2024-12-15 01:33:35,305 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@710636b0 2024-12-15 01:33:35,319 main DEBUG Took 0.012120 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-15 01:33:35,320 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-15 01:33:35,320 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-15 01:33:35,321 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-15 01:33:35,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,332 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-15 01:33:35,347 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,349 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,350 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,351 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,351 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,352 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,353 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,354 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,354 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,355 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,356 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,356 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,357 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,357 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,358 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,361 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,361 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,362 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,363 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,364 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,364 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,365 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,366 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 01:33:35,366 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,367 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-15 01:33:35,369 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 01:33:35,371 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-15 01:33:35,374 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-15 01:33:35,375 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-15 01:33:35,377 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-15 01:33:35,378 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-15 01:33:35,390 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-15 01:33:35,394 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-15 01:33:35,396 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-15 01:33:35,397 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-15 01:33:35,397 main DEBUG createAppenders(={Console}) 2024-12-15 01:33:35,398 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@710636b0 initialized 2024-12-15 01:33:35,399 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@710636b0 2024-12-15 01:33:35,399 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@710636b0 OK. 2024-12-15 01:33:35,400 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-15 01:33:35,400 main DEBUG OutputStream closed 2024-12-15 01:33:35,401 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-15 01:33:35,401 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-15 01:33:35,401 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@618425b5 OK 2024-12-15 01:33:35,560 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-15 01:33:35,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-15 01:33:35,565 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-15 01:33:35,567 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-15 01:33:35,568 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-15 01:33:35,568 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-15 01:33:35,569 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-15 01:33:35,569 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-15 01:33:35,570 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-15 01:33:35,570 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-15 01:33:35,571 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-15 01:33:35,575 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-15 01:33:35,576 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-15 01:33:35,576 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-15 01:33:35,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-15 01:33:35,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-15 01:33:35,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-15 01:33:35,579 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-15 01:33:35,594 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-15 01:33:35,594 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@41e68d87) with optional ClassLoader: null 2024-12-15 01:33:35,595 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-15 01:33:35,596 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@41e68d87] started OK. 2024-12-15T01:33:35,625 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-15 01:33:35,629 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-15 01:33:35,631 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-15T01:33:36,035 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8 2024-12-15T01:33:36,036 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-15T01:33:36,082 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-15T01:33:36,371 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-15T01:33:36,372 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10, deleteOnExit=true 2024-12-15T01:33:36,372 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-15T01:33:36,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/test.cache.data in system properties and HBase conf 2024-12-15T01:33:36,374 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.tmp.dir in system properties and HBase conf 2024-12-15T01:33:36,374 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir in system properties and HBase conf 2024-12-15T01:33:36,375 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-15T01:33:36,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-15T01:33:36,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-15T01:33:36,483 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-15T01:33:36,489 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-15T01:33:36,500 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-15T01:33:36,502 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-15T01:33:36,504 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-15T01:33:36,505 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-15T01:33:36,506 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-15T01:33:36,508 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-15T01:33:36,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-15T01:33:36,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-15T01:33:36,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/nfs.dump.dir in system properties and HBase conf 2024-12-15T01:33:36,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir in system properties and HBase conf 2024-12-15T01:33:36,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-15T01:33:36,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-15T01:33:36,513 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-15T01:33:37,804 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-15T01:33:37,886 INFO [Time-limited test {}] log.Log(170): Logging initialized @3537ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-15T01:33:37,969 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:38,048 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:38,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:38,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:38,101 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-15T01:33:38,122 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:38,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@304604cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:38,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ad95383{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:38,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@275c2418{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-35059-hadoop-hdfs-3_4_1-tests_jar-_-any-6424919106139990122/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-15T01:33:38,453 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2847eba1{HTTP/1.1, (http/1.1)}{localhost:35059} 2024-12-15T01:33:38,453 INFO [Time-limited test {}] server.Server(415): Started @4106ms 2024-12-15T01:33:38,978 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:38,985 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:38,986 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:38,986 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:38,987 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-15T01:33:38,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57ea78fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:38,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b69292c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:39,093 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fab760a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-41125-hadoop-hdfs-3_4_1-tests_jar-_-any-1848929380203702401/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T01:33:39,094 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@120fd64b{HTTP/1.1, (http/1.1)}{localhost:41125} 2024-12-15T01:33:39,094 INFO [Time-limited test {}] server.Server(415): Started @4747ms 2024-12-15T01:33:39,171 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-15T01:33:39,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:39,310 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:39,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:39,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:39,316 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-15T01:33:39,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aead573{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:39,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18863edc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:39,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8add7a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-36059-hadoop-hdfs-3_4_1-tests_jar-_-any-5952347981089979104/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T01:33:39,429 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@423c535d{HTTP/1.1, (http/1.1)}{localhost:36059} 2024-12-15T01:33:39,429 INFO [Time-limited test {}] server.Server(415): Started @5081ms 2024-12-15T01:33:39,432 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-15T01:33:39,494 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:39,501 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:39,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:39,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:39,505 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-15T01:33:39,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@689e564d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:39,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bfb305e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:39,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@321bdc95{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-34737-hadoop-hdfs-3_4_1-tests_jar-_-any-8859879543366029544/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T01:33:39,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27188009{HTTP/1.1, (http/1.1)}{localhost:34737} 2024-12-15T01:33:39,622 INFO [Time-limited test {}] server.Server(415): Started @5274ms 2024-12-15T01:33:39,625 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-15T01:33:40,589 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1/current/BP-747981782-172.17.0.2-1734226417326/current, will proceed with Du for space computation calculation, 2024-12-15T01:33:40,589 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2/current/BP-747981782-172.17.0.2-1734226417326/current, will proceed with Du for space computation calculation, 2024-12-15T01:33:40,589 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4/current/BP-747981782-172.17.0.2-1734226417326/current, will proceed with Du for space computation calculation, 2024-12-15T01:33:40,589 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3/current/BP-747981782-172.17.0.2-1734226417326/current, will proceed with Du for space computation calculation, 2024-12-15T01:33:40,636 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-15T01:33:40,636 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-15T01:33:40,681 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbc7f277ebc07f1de with lease ID 0x7adaa574b5e28442: Processing first storage report for DS-ed77dea0-c2d8-48b0-b5bf-eaf2aed0c4fa from datanode DatanodeRegistration(127.0.0.1:38403, datanodeUuid=84f809e7-5568-47ff-ae29-cccdf581a3d9, infoPort=33539, infoSecurePort=0, ipcPort=44765, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326) 2024-12-15T01:33:40,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbc7f277ebc07f1de with lease ID 0x7adaa574b5e28442: from storage DS-ed77dea0-c2d8-48b0-b5bf-eaf2aed0c4fa node DatanodeRegistration(127.0.0.1:38403, datanodeUuid=84f809e7-5568-47ff-ae29-cccdf581a3d9, infoPort=33539, infoSecurePort=0, ipcPort=44765, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-15T01:33:40,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x115da8b9f54be6f5 with lease ID 0x7adaa574b5e28443: Processing first storage report for DS-a619e4b2-7115-404b-8a2a-e1ff218d0fdc from datanode DatanodeRegistration(127.0.0.1:38745, datanodeUuid=dc094a0d-88b6-4c61-9536-9650187be222, infoPort=45299, infoSecurePort=0, ipcPort=42339, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326) 2024-12-15T01:33:40,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x115da8b9f54be6f5 with lease ID 0x7adaa574b5e28443: from storage DS-a619e4b2-7115-404b-8a2a-e1ff218d0fdc node DatanodeRegistration(127.0.0.1:38745, datanodeUuid=dc094a0d-88b6-4c61-9536-9650187be222, infoPort=45299, infoSecurePort=0, ipcPort=42339, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-15T01:33:40,683 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbc7f277ebc07f1de with lease ID 0x7adaa574b5e28442: Processing first storage report for DS-960a9e5e-ed59-411a-abfd-a13985d3b954 from datanode DatanodeRegistration(127.0.0.1:38403, datanodeUuid=84f809e7-5568-47ff-ae29-cccdf581a3d9, infoPort=33539, infoSecurePort=0, ipcPort=44765, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326) 2024-12-15T01:33:40,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbc7f277ebc07f1de with lease ID 0x7adaa574b5e28442: from storage DS-960a9e5e-ed59-411a-abfd-a13985d3b954 node DatanodeRegistration(127.0.0.1:38403, datanodeUuid=84f809e7-5568-47ff-ae29-cccdf581a3d9, infoPort=33539, infoSecurePort=0, ipcPort=44765, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-15T01:33:40,683 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x115da8b9f54be6f5 with lease ID 0x7adaa574b5e28443: Processing first storage report for DS-f9d6e7e4-3643-4b17-9258-350c982a6f39 from datanode DatanodeRegistration(127.0.0.1:38745, datanodeUuid=dc094a0d-88b6-4c61-9536-9650187be222, infoPort=45299, infoSecurePort=0, ipcPort=42339, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326) 2024-12-15T01:33:40,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x115da8b9f54be6f5 with lease ID 0x7adaa574b5e28443: from storage DS-f9d6e7e4-3643-4b17-9258-350c982a6f39 node DatanodeRegistration(127.0.0.1:38745, datanodeUuid=dc094a0d-88b6-4c61-9536-9650187be222, infoPort=45299, infoSecurePort=0, ipcPort=42339, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-15T01:33:40,751 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6/current/BP-747981782-172.17.0.2-1734226417326/current, will proceed with Du for space computation calculation, 2024-12-15T01:33:40,751 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5/current/BP-747981782-172.17.0.2-1734226417326/current, will proceed with Du for space computation calculation, 2024-12-15T01:33:40,772 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-15T01:33:40,776 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ad8cad18b32ed51 with lease ID 0x7adaa574b5e28444: Processing first storage report for DS-104a2fd6-e7a1-43fc-ac07-1d9598d0600c from datanode DatanodeRegistration(127.0.0.1:46345, datanodeUuid=db4c7c2f-79b5-4215-8052-98ca3651da77, infoPort=45149, infoSecurePort=0, ipcPort=41527, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326) 2024-12-15T01:33:40,776 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ad8cad18b32ed51 with lease ID 0x7adaa574b5e28444: from storage DS-104a2fd6-e7a1-43fc-ac07-1d9598d0600c node DatanodeRegistration(127.0.0.1:46345, datanodeUuid=db4c7c2f-79b5-4215-8052-98ca3651da77, infoPort=45149, infoSecurePort=0, ipcPort=41527, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-15T01:33:40,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ad8cad18b32ed51 with lease ID 0x7adaa574b5e28444: Processing first storage report for DS-d25ed95b-394d-4801-829b-fe157ba21584 from datanode DatanodeRegistration(127.0.0.1:46345, datanodeUuid=db4c7c2f-79b5-4215-8052-98ca3651da77, infoPort=45149, infoSecurePort=0, ipcPort=41527, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326) 2024-12-15T01:33:40,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ad8cad18b32ed51 with lease ID 0x7adaa574b5e28444: from storage DS-d25ed95b-394d-4801-829b-fe157ba21584 node DatanodeRegistration(127.0.0.1:46345, datanodeUuid=db4c7c2f-79b5-4215-8052-98ca3651da77, infoPort=45149, infoSecurePort=0, ipcPort=41527, storageInfo=lv=-57;cid=testClusterID;nsid=360697808;c=1734226417326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-15T01:33:40,819 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8 2024-12-15T01:33:40,897 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/zookeeper_0, clientPort=64248, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-15T01:33:40,909 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64248 2024-12-15T01:33:40,920 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:40,922 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:41,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741825_1001 (size=7) 2024-12-15T01:33:41,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741825_1001 (size=7) 2024-12-15T01:33:41,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741825_1001 (size=7) 2024-12-15T01:33:41,650 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b with version=8 2024-12-15T01:33:41,651 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/hbase-staging 2024-12-15T01:33:41,782 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-15T01:33:42,087 INFO [Time-limited test {}] client.ConnectionUtils(129): master/66d575e28c61:0 server-side Connection retries=45 2024-12-15T01:33:42,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,107 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-15T01:33:42,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-15T01:33:42,230 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-15T01:33:42,297 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-15T01:33:42,308 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-15T01:33:42,313 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-15T01:33:42,344 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 49582 (auto-detected) 2024-12-15T01:33:42,346 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-15T01:33:42,370 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40789 2024-12-15T01:33:42,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,403 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40789 connecting to ZooKeeper ensemble=127.0.0.1:64248 2024-12-15T01:33:42,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:407890x0, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-15T01:33:42,525 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40789-0x1002752410c0000 connected 2024-12-15T01:33:42,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T01:33:42,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:33:42,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-15T01:33:42,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40789 2024-12-15T01:33:42,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40789 2024-12-15T01:33:42,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40789 2024-12-15T01:33:42,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40789 2024-12-15T01:33:42,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40789 2024-12-15T01:33:42,654 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b, hbase.cluster.distributed=false 2024-12-15T01:33:42,716 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/66d575e28c61:0 server-side Connection retries=45 2024-12-15T01:33:42,716 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,716 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,717 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-15T01:33:42,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-15T01:33:42,719 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-15T01:33:42,722 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-15T01:33:42,723 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42663 2024-12-15T01:33:42,725 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-15T01:33:42,736 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-15T01:33:42,737 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,744 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42663 connecting to ZooKeeper ensemble=127.0.0.1:64248 2024-12-15T01:33:42,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426630x0, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-15T01:33:42,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426630x0, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T01:33:42,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426630x0, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:33:42,756 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:426630x0, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-15T01:33:42,759 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42663-0x1002752410c0001 connected 2024-12-15T01:33:42,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42663 2024-12-15T01:33:42,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42663 2024-12-15T01:33:42,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42663 2024-12-15T01:33:42,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42663 2024-12-15T01:33:42,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42663 2024-12-15T01:33:42,800 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/66d575e28c61:0 server-side Connection retries=45 2024-12-15T01:33:42,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,801 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-15T01:33:42,801 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,801 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-15T01:33:42,802 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-15T01:33:42,802 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-15T01:33:42,803 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35281 2024-12-15T01:33:42,804 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-15T01:33:42,805 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-15T01:33:42,806 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,812 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:35281 connecting to ZooKeeper ensemble=127.0.0.1:64248 2024-12-15T01:33:42,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352810x0, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-15T01:33:42,841 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:352810x0, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T01:33:42,841 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35281-0x1002752410c0002 connected 2024-12-15T01:33:42,843 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:33:42,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-15T01:33:42,845 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35281 2024-12-15T01:33:42,846 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35281 2024-12-15T01:33:42,847 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35281 2024-12-15T01:33:42,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35281 2024-12-15T01:33:42,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35281 2024-12-15T01:33:42,873 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/66d575e28c61:0 server-side Connection retries=45 2024-12-15T01:33:42,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,874 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-15T01:33:42,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T01:33:42,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-15T01:33:42,874 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-15T01:33:42,874 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-15T01:33:42,875 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39381 2024-12-15T01:33:42,876 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-15T01:33:42,879 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-15T01:33:42,880 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:42,888 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:39381 connecting to ZooKeeper ensemble=127.0.0.1:64248 2024-12-15T01:33:42,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:393810x0, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-15T01:33:42,903 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:393810x0, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T01:33:42,903 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39381-0x1002752410c0003 connected 2024-12-15T01:33:42,904 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:33:42,905 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-15T01:33:42,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39381 2024-12-15T01:33:42,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39381 2024-12-15T01:33:42,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39381 2024-12-15T01:33:42,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39381 2024-12-15T01:33:42,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39381 2024-12-15T01:33:42,922 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/66d575e28c61,40789,1734226421776 2024-12-15T01:33:42,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,938 DEBUG [M:0;66d575e28c61:40789 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d575e28c61:40789 2024-12-15T01:33:42,940 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d575e28c61,40789,1734226421776 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:42,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:42,971 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-15T01:33:42,972 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-15T01:33:42,972 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d575e28c61,40789,1734226421776 from backup master directory 2024-12-15T01:33:42,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d575e28c61,40789,1734226421776 2024-12-15T01:33:42,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T01:33:42,986 WARN [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-15T01:33:42,986 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d575e28c61,40789,1734226421776 2024-12-15T01:33:42,989 INFO [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-15T01:33:42,991 INFO [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-15T01:33:43,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741826_1002 (size=42) 2024-12-15T01:33:43,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741826_1002 (size=42) 2024-12-15T01:33:43,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741826_1002 (size=42) 2024-12-15T01:33:43,096 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/hbase.id with ID: dbf0be15-6229-4f03-afc1-94557509e2a2 2024-12-15T01:33:43,141 INFO [master/66d575e28c61:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T01:33:43,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741827_1003 (size=196) 2024-12-15T01:33:43,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741827_1003 (size=196) 2024-12-15T01:33:43,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741827_1003 (size=196) 2024-12-15T01:33:43,238 INFO [master/66d575e28c61:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:33:43,240 INFO [master/66d575e28c61:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-15T01:33:43,267 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:33:43,272 INFO [master/66d575e28c61:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T01:33:43,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741828_1004 (size=1189) 2024-12-15T01:33:43,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741828_1004 (size=1189) 2024-12-15T01:33:43,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741828_1004 (size=1189) 2024-12-15T01:33:43,339 INFO [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/data/master/store 2024-12-15T01:33:43,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741829_1005 (size=34) 2024-12-15T01:33:43,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741829_1005 (size=34) 2024-12-15T01:33:43,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741829_1005 (size=34) 2024-12-15T01:33:43,369 INFO [master/66d575e28c61:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-15T01:33:43,369 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:43,370 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-15T01:33:43,370 INFO [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T01:33:43,371 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T01:33:43,371 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-15T01:33:43,371 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T01:33:43,371 INFO [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T01:33:43,371 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-15T01:33:43,373 WARN [master/66d575e28c61:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/data/master/store/.initializing 2024-12-15T01:33:43,373 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776 2024-12-15T01:33:43,380 INFO [master/66d575e28c61:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-15T01:33:43,396 INFO [master/66d575e28c61:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d575e28c61%2C40789%2C1734226421776, suffix=, logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776, archiveDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/oldWALs, maxLogs=10 2024-12-15T01:33:43,423 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403, exclude list is [], retry=0 2024-12-15T01:33:43,441 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38403,DS-ed77dea0-c2d8-48b0-b5bf-eaf2aed0c4fa,DISK] 2024-12-15T01:33:43,441 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38745,DS-a619e4b2-7115-404b-8a2a-e1ff218d0fdc,DISK] 2024-12-15T01:33:43,441 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46345,DS-104a2fd6-e7a1-43fc-ac07-1d9598d0600c,DISK] 2024-12-15T01:33:43,445 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-15T01:33:43,488 INFO [master/66d575e28c61:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403 2024-12-15T01:33:43,489 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33539:33539),(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:45299:45299)] 2024-12-15T01:33:43,490 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-15T01:33:43,491 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:43,496 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,498 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,547 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,570 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-15T01:33:43,575 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:43,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T01:33:43,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-15T01:33:43,584 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:43,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:33:43,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-15T01:33:43,590 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:43,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:33:43,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-15T01:33:43,597 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:43,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:33:43,603 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,605 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,617 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-15T01:33:43,631 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-15T01:33:43,637 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:33:43,639 INFO [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71179247, jitterRate=0.060653433203697205}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-15T01:33:43,644 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-15T01:33:43,645 INFO [master/66d575e28c61:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-15T01:33:43,682 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f59972, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:43,718 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-15T01:33:43,729 INFO [master/66d575e28c61:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-15T01:33:43,729 INFO [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-15T01:33:43,731 INFO [master/66d575e28c61:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-15T01:33:43,732 INFO [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-15T01:33:43,737 INFO [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-15T01:33:43,737 INFO [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-15T01:33:43,765 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-15T01:33:43,780 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-15T01:33:43,785 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-15T01:33:43,789 INFO [master/66d575e28c61:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-15T01:33:43,791 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-15T01:33:43,852 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-15T01:33:43,855 INFO [master/66d575e28c61:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-15T01:33:43,860 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-15T01:33:43,869 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-15T01:33:43,871 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-15T01:33:43,877 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-15T01:33:43,887 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-15T01:33:43,894 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-15T01:33:43,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-15T01:33:43,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-15T01:33:43,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-15T01:33:43,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-15T01:33:43,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,905 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=66d575e28c61,40789,1734226421776, sessionid=0x1002752410c0000, setting cluster-up flag (Was=false) 2024-12-15T01:33:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,961 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-15T01:33:43,963 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d575e28c61,40789,1734226421776 2024-12-15T01:33:43,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:43,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:44,011 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-15T01:33:44,012 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d575e28c61,40789,1734226421776 2024-12-15T01:33:44,051 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;66d575e28c61:35281 2024-12-15T01:33:44,064 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;66d575e28c61:39381 2024-12-15T01:33:44,064 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1008): ClusterId : dbf0be15-6229-4f03-afc1-94557509e2a2 2024-12-15T01:33:44,068 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-15T01:33:44,070 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d575e28c61:42663 2024-12-15T01:33:44,071 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1008): ClusterId : dbf0be15-6229-4f03-afc1-94557509e2a2 2024-12-15T01:33:44,071 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-15T01:33:44,086 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1008): ClusterId : dbf0be15-6229-4f03-afc1-94557509e2a2 2024-12-15T01:33:44,086 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-15T01:33:44,095 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-15T01:33:44,096 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-15T01:33:44,102 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-15T01:33:44,102 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-15T01:33:44,103 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-15T01:33:44,103 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-15T01:33:44,112 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-15T01:33:44,112 DEBUG [RS:1;66d575e28c61:35281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7127afa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:44,117 DEBUG [RS:1;66d575e28c61:35281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296a1d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d575e28c61/172.17.0.2:0 2024-12-15T01:33:44,120 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-15T01:33:44,120 DEBUG [RS:2;66d575e28c61:39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6de75ef1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:44,121 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-15T01:33:44,121 INFO [RS:1;66d575e28c61:35281 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-15T01:33:44,121 INFO [RS:1;66d575e28c61:35281 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-15T01:33:44,122 DEBUG [RS:0;66d575e28c61:42663 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c530ae7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:44,125 DEBUG [RS:2;66d575e28c61:39381 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a2913f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d575e28c61/172.17.0.2:0 2024-12-15T01:33:44,125 INFO [RS:2;66d575e28c61:39381 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-15T01:33:44,125 INFO [RS:2;66d575e28c61:39381 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-15T01:33:44,126 DEBUG [RS:0;66d575e28c61:42663 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ce708f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d575e28c61/172.17.0.2:0 2024-12-15T01:33:44,126 INFO [RS:0;66d575e28c61:42663 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-15T01:33:44,126 INFO [RS:0;66d575e28c61:42663 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-15T01:33:44,137 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-15T01:33:44,137 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-15T01:33:44,139 INFO [master/66d575e28c61:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:44,139 INFO [RS:2;66d575e28c61:39381 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:44,139 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-15T01:33:44,139 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-15T01:33:44,139 INFO [master/66d575e28c61:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-15T01:33:44,139 INFO [RS:0;66d575e28c61:42663 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:44,139 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-15T01:33:44,140 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-15T01:33:44,140 INFO [RS:1;66d575e28c61:35281 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:44,140 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-15T01:33:44,142 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(3073): reportForDuty to master=66d575e28c61,40789,1734226421776 with isa=66d575e28c61/172.17.0.2:39381, startcode=1734226422872 2024-12-15T01:33:44,142 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(3073): reportForDuty to master=66d575e28c61,40789,1734226421776 with isa=66d575e28c61/172.17.0.2:42663, startcode=1734226422715 2024-12-15T01:33:44,143 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(3073): reportForDuty to master=66d575e28c61,40789,1734226421776 with isa=66d575e28c61/172.17.0.2:35281, startcode=1734226422799 2024-12-15T01:33:44,162 DEBUG [RS:0;66d575e28c61:42663 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T01:33:44,162 DEBUG [RS:2;66d575e28c61:39381 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T01:33:44,165 DEBUG [RS:1;66d575e28c61:35281 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T01:33:44,204 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34483, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T01:33:44,205 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35077, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T01:33:44,204 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48651, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T01:33:44,206 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-15T01:33:44,211 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T01:33:44,213 INFO [master/66d575e28c61:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-15T01:33:44,217 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T01:33:44,218 INFO [master/66d575e28c61:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-15T01:33:44,218 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T01:33:44,225 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d575e28c61,40789,1734226421776 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-15T01:33:44,230 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d575e28c61:0, corePoolSize=5, maxPoolSize=5 2024-12-15T01:33:44,230 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d575e28c61:0, corePoolSize=5, maxPoolSize=5 2024-12-15T01:33:44,230 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d575e28c61:0, corePoolSize=5, maxPoolSize=5 2024-12-15T01:33:44,231 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d575e28c61:0, corePoolSize=5, maxPoolSize=5 2024-12-15T01:33:44,231 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d575e28c61:0, corePoolSize=10, maxPoolSize=10 2024-12-15T01:33:44,231 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,231 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d575e28c61:0, corePoolSize=2, maxPoolSize=2 2024-12-15T01:33:44,231 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,236 INFO [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734226454236 2024-12-15T01:33:44,239 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-15T01:33:44,240 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-15T01:33:44,241 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-15T01:33:44,241 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-15T01:33:44,244 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-15T01:33:44,245 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-15T01:33:44,245 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-15T01:33:44,245 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-15T01:33:44,245 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:44,245 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-15T01:33:44,245 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-15T01:33:44,245 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-15T01:33:44,246 WARN [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-15T01:33:44,246 WARN [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-15T01:33:44,246 WARN [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-15T01:33:44,246 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,246 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-15T01:33:44,247 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-15T01:33:44,249 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-15T01:33:44,250 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-15T01:33:44,255 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-15T01:33:44,256 INFO [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-15T01:33:44,259 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d575e28c61:0:becomeActiveMaster-HFileCleaner.large.0-1734226424258,5,FailOnTimeoutGroup] 2024-12-15T01:33:44,263 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d575e28c61:0:becomeActiveMaster-HFileCleaner.small.0-1734226424259,5,FailOnTimeoutGroup] 2024-12-15T01:33:44,263 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,264 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-15T01:33:44,265 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,265 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741831_1007 (size=1039) 2024-12-15T01:33:44,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741831_1007 (size=1039) 2024-12-15T01:33:44,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741831_1007 (size=1039) 2024-12-15T01:33:44,275 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-15T01:33:44,275 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:44,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741832_1008 (size=32) 2024-12-15T01:33:44,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741832_1008 (size=32) 2024-12-15T01:33:44,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741832_1008 (size=32) 2024-12-15T01:33:44,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:44,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-15T01:33:44,297 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-15T01:33:44,297 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:44,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T01:33:44,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-15T01:33:44,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-15T01:33:44,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:44,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T01:33:44,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-15T01:33:44,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-15T01:33:44,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:44,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T01:33:44,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740 2024-12-15T01:33:44,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740 2024-12-15T01:33:44,312 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-15T01:33:44,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-15T01:33:44,318 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:33:44,319 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64600971, jitterRate=-0.03737051784992218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-15T01:33:44,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-15T01:33:44,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-15T01:33:44,321 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-15T01:33:44,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-15T01:33:44,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-15T01:33:44,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-15T01:33:44,323 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-15T01:33:44,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-15T01:33:44,325 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-15T01:33:44,325 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-15T01:33:44,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-15T01:33:44,339 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-15T01:33:44,342 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-15T01:33:44,347 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(3073): reportForDuty to master=66d575e28c61,40789,1734226421776 with isa=66d575e28c61/172.17.0.2:39381, startcode=1734226422872 2024-12-15T01:33:44,347 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(3073): reportForDuty to master=66d575e28c61,40789,1734226421776 with isa=66d575e28c61/172.17.0.2:42663, startcode=1734226422715 2024-12-15T01:33:44,347 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(3073): reportForDuty to master=66d575e28c61,40789,1734226421776 with isa=66d575e28c61/172.17.0.2:35281, startcode=1734226422799 2024-12-15T01:33:44,348 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 66d575e28c61,35281,1734226422799 2024-12-15T01:33:44,350 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] master.ServerManager(486): Registering regionserver=66d575e28c61,35281,1734226422799 2024-12-15T01:33:44,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 66d575e28c61,39381,1734226422872 2024-12-15T01:33:44,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] master.ServerManager(486): Registering regionserver=66d575e28c61,39381,1734226422872 2024-12-15T01:33:44,358 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:44,358 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33329 2024-12-15T01:33:44,358 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-15T01:33:44,360 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 66d575e28c61,42663,1734226422715 2024-12-15T01:33:44,361 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] master.ServerManager(486): Registering regionserver=66d575e28c61,42663,1734226422715 2024-12-15T01:33:44,361 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:44,361 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33329 2024-12-15T01:33:44,361 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-15T01:33:44,364 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:44,364 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33329 2024-12-15T01:33:44,365 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-15T01:33:44,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-15T01:33:44,404 DEBUG [RS:2;66d575e28c61:39381 {}] zookeeper.ZKUtil(111): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d575e28c61,39381,1734226422872 2024-12-15T01:33:44,404 WARN [RS:2;66d575e28c61:39381 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-15T01:33:44,404 DEBUG [RS:1;66d575e28c61:35281 {}] zookeeper.ZKUtil(111): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d575e28c61,35281,1734226422799 2024-12-15T01:33:44,404 INFO [RS:2;66d575e28c61:39381 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T01:33:44,404 WARN [RS:1;66d575e28c61:35281 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-15T01:33:44,405 INFO [RS:1;66d575e28c61:35281 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T01:33:44,405 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,39381,1734226422872 2024-12-15T01:33:44,405 DEBUG [RS:0;66d575e28c61:42663 {}] zookeeper.ZKUtil(111): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d575e28c61,42663,1734226422715 2024-12-15T01:33:44,405 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,35281,1734226422799 2024-12-15T01:33:44,405 WARN [RS:0;66d575e28c61:42663 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-15T01:33:44,405 INFO [RS:0;66d575e28c61:42663 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T01:33:44,405 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715 2024-12-15T01:33:44,406 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d575e28c61,35281,1734226422799] 2024-12-15T01:33:44,406 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d575e28c61,42663,1734226422715] 2024-12-15T01:33:44,406 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d575e28c61,39381,1734226422872] 2024-12-15T01:33:44,419 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-15T01:33:44,419 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-15T01:33:44,419 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-15T01:33:44,430 INFO [RS:2;66d575e28c61:39381 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-15T01:33:44,430 INFO [RS:1;66d575e28c61:35281 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-15T01:33:44,430 INFO [RS:0;66d575e28c61:42663 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-15T01:33:44,460 INFO [RS:0;66d575e28c61:42663 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-15T01:33:44,463 INFO [RS:2;66d575e28c61:39381 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-15T01:33:44,463 INFO [RS:1;66d575e28c61:35281 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-15T01:33:44,467 INFO [RS:1;66d575e28c61:35281 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-15T01:33:44,467 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,468 INFO [RS:2;66d575e28c61:39381 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-15T01:33:44,469 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,474 INFO [RS:0;66d575e28c61:42663 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-15T01:33:44,474 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,476 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-15T01:33:44,487 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,487 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,487 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,487 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d575e28c61:0, corePoolSize=2, maxPoolSize=2 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0, corePoolSize=3, maxPoolSize=3 2024-12-15T01:33:44,488 DEBUG [RS:0;66d575e28c61:42663 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d575e28c61:0, corePoolSize=3, maxPoolSize=3 2024-12-15T01:33:44,491 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-15T01:33:44,492 WARN [66d575e28c61:40789 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-15T01:33:44,493 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d575e28c61:0, corePoolSize=2, maxPoolSize=2 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,494 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,495 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,495 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,495 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,495 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0, corePoolSize=3, maxPoolSize=3 2024-12-15T01:33:44,495 DEBUG [RS:2;66d575e28c61:39381 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d575e28c61:0, corePoolSize=3, maxPoolSize=3 2024-12-15T01:33:44,499 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-15T01:33:44,502 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,502 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,502 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d575e28c61:0, corePoolSize=2, maxPoolSize=2 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,503 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,504 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d575e28c61:0, corePoolSize=1, maxPoolSize=1 2024-12-15T01:33:44,504 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0, corePoolSize=3, maxPoolSize=3 2024-12-15T01:33:44,504 DEBUG [RS:1;66d575e28c61:35281 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d575e28c61:0, corePoolSize=3, maxPoolSize=3 2024-12-15T01:33:44,523 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,523 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,524 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,524 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,524 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,42663,1734226422715-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-15T01:33:44,527 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,39381,1734226422872-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,528 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,35281,1734226422799-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-15T01:33:44,549 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-15T01:33:44,549 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-15T01:33:44,551 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,35281,1734226422799-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,551 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,42663,1734226422715-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,553 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-15T01:33:44,553 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,39381,1734226422872-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:44,578 INFO [RS:0;66d575e28c61:42663 {}] regionserver.Replication(204): 66d575e28c61,42663,1734226422715 started 2024-12-15T01:33:44,578 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1767): Serving as 66d575e28c61,42663,1734226422715, RpcServer on 66d575e28c61/172.17.0.2:42663, sessionid=0x1002752410c0001 2024-12-15T01:33:44,579 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-15T01:33:44,579 DEBUG [RS:0;66d575e28c61:42663 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d575e28c61,42663,1734226422715 2024-12-15T01:33:44,579 INFO [RS:1;66d575e28c61:35281 {}] regionserver.Replication(204): 66d575e28c61,35281,1734226422799 started 2024-12-15T01:33:44,579 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d575e28c61,42663,1734226422715' 2024-12-15T01:33:44,579 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-15T01:33:44,579 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1767): Serving as 66d575e28c61,35281,1734226422799, RpcServer on 66d575e28c61/172.17.0.2:35281, sessionid=0x1002752410c0002 2024-12-15T01:33:44,580 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-15T01:33:44,580 DEBUG [RS:1;66d575e28c61:35281 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d575e28c61,35281,1734226422799 2024-12-15T01:33:44,580 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d575e28c61,35281,1734226422799' 2024-12-15T01:33:44,580 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-15T01:33:44,580 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-15T01:33:44,580 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-15T01:33:44,581 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-15T01:33:44,581 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-15T01:33:44,581 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-15T01:33:44,581 DEBUG [RS:0;66d575e28c61:42663 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d575e28c61,42663,1734226422715 2024-12-15T01:33:44,581 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-15T01:33:44,581 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d575e28c61,42663,1734226422715' 2024-12-15T01:33:44,581 DEBUG [RS:1;66d575e28c61:35281 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d575e28c61,35281,1734226422799 2024-12-15T01:33:44,581 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-15T01:33:44,581 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d575e28c61,35281,1734226422799' 2024-12-15T01:33:44,581 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-15T01:33:44,582 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-15T01:33:44,582 DEBUG [RS:1;66d575e28c61:35281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-15T01:33:44,583 INFO [RS:1;66d575e28c61:35281 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-15T01:33:44,583 INFO [RS:1;66d575e28c61:35281 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-15T01:33:44,583 INFO [RS:2;66d575e28c61:39381 {}] regionserver.Replication(204): 66d575e28c61,39381,1734226422872 started 2024-12-15T01:33:44,583 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1767): Serving as 66d575e28c61,39381,1734226422872, RpcServer on 66d575e28c61/172.17.0.2:39381, sessionid=0x1002752410c0003 2024-12-15T01:33:44,583 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-15T01:33:44,583 DEBUG [RS:2;66d575e28c61:39381 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d575e28c61,39381,1734226422872 2024-12-15T01:33:44,583 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d575e28c61,39381,1734226422872' 2024-12-15T01:33:44,583 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-15T01:33:44,584 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-15T01:33:44,585 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-15T01:33:44,585 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-15T01:33:44,585 DEBUG [RS:2;66d575e28c61:39381 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d575e28c61,39381,1734226422872 2024-12-15T01:33:44,585 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d575e28c61,39381,1734226422872' 2024-12-15T01:33:44,585 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-15T01:33:44,586 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-15T01:33:44,586 DEBUG [RS:2;66d575e28c61:39381 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-15T01:33:44,586 INFO [RS:2;66d575e28c61:39381 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-15T01:33:44,586 INFO [RS:2;66d575e28c61:39381 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-15T01:33:44,592 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-15T01:33:44,592 DEBUG [RS:0;66d575e28c61:42663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-15T01:33:44,592 INFO [RS:0;66d575e28c61:42663 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-15T01:33:44,593 INFO [RS:0;66d575e28c61:42663 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-15T01:33:44,689 INFO [RS:2;66d575e28c61:39381 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-15T01:33:44,689 INFO [RS:1;66d575e28c61:35281 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-15T01:33:44,692 INFO [RS:1;66d575e28c61:35281 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d575e28c61%2C35281%2C1734226422799, suffix=, logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,35281,1734226422799, archiveDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs, maxLogs=32 2024-12-15T01:33:44,692 INFO [RS:2;66d575e28c61:39381 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d575e28c61%2C39381%2C1734226422872, suffix=, logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,39381,1734226422872, archiveDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs, maxLogs=32 2024-12-15T01:33:44,693 INFO [RS:0;66d575e28c61:42663 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-15T01:33:44,698 INFO [RS:0;66d575e28c61:42663 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d575e28c61%2C42663%2C1734226422715, suffix=, logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715, archiveDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs, maxLogs=32 2024-12-15T01:33:44,711 DEBUG [RS:1;66d575e28c61:35281 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,35281,1734226422799/66d575e28c61%2C35281%2C1734226422799.1734226424697, exclude list is [], retry=0 2024-12-15T01:33:44,713 DEBUG [RS:2;66d575e28c61:39381 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,39381,1734226422872/66d575e28c61%2C39381%2C1734226422872.1734226424697, exclude list is [], retry=0 2024-12-15T01:33:44,721 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38403,DS-ed77dea0-c2d8-48b0-b5bf-eaf2aed0c4fa,DISK] 2024-12-15T01:33:44,721 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46345,DS-104a2fd6-e7a1-43fc-ac07-1d9598d0600c,DISK] 2024-12-15T01:33:44,721 DEBUG [RS:0;66d575e28c61:42663 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715/66d575e28c61%2C42663%2C1734226422715.1734226424700, exclude list is [], retry=0 2024-12-15T01:33:44,722 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38403,DS-ed77dea0-c2d8-48b0-b5bf-eaf2aed0c4fa,DISK] 2024-12-15T01:33:44,722 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38745,DS-a619e4b2-7115-404b-8a2a-e1ff218d0fdc,DISK] 2024-12-15T01:33:44,722 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38745,DS-a619e4b2-7115-404b-8a2a-e1ff218d0fdc,DISK] 2024-12-15T01:33:44,723 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46345,DS-104a2fd6-e7a1-43fc-ac07-1d9598d0600c,DISK] 2024-12-15T01:33:44,728 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46345,DS-104a2fd6-e7a1-43fc-ac07-1d9598d0600c,DISK] 2024-12-15T01:33:44,749 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38403,DS-ed77dea0-c2d8-48b0-b5bf-eaf2aed0c4fa,DISK] 2024-12-15T01:33:44,749 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38745,DS-a619e4b2-7115-404b-8a2a-e1ff218d0fdc,DISK] 2024-12-15T01:33:44,756 INFO [RS:2;66d575e28c61:39381 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,39381,1734226422872/66d575e28c61%2C39381%2C1734226422872.1734226424697 2024-12-15T01:33:44,757 DEBUG [RS:2;66d575e28c61:39381 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33539:33539),(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:45299:45299)] 2024-12-15T01:33:44,757 INFO [RS:1;66d575e28c61:35281 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,35281,1734226422799/66d575e28c61%2C35281%2C1734226422799.1734226424697 2024-12-15T01:33:44,757 INFO [RS:0;66d575e28c61:42663 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715/66d575e28c61%2C42663%2C1734226422715.1734226424700 2024-12-15T01:33:44,758 DEBUG [RS:1;66d575e28c61:35281 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45299:45299),(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:33539:33539)] 2024-12-15T01:33:44,758 DEBUG [RS:0;66d575e28c61:42663 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:33539:33539),(127.0.0.1/127.0.0.1:45299:45299)] 2024-12-15T01:33:44,994 DEBUG [66d575e28c61:40789 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-15T01:33:44,997 DEBUG [66d575e28c61:40789 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:33:45,002 DEBUG [66d575e28c61:40789 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:33:45,002 DEBUG [66d575e28c61:40789 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:33:45,002 DEBUG [66d575e28c61:40789 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:33:45,003 INFO [66d575e28c61:40789 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:33:45,003 INFO [66d575e28c61:40789 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:33:45,003 INFO [66d575e28c61:40789 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:33:45,003 DEBUG [66d575e28c61:40789 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:33:45,007 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:33:45,012 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d575e28c61,42663,1734226422715, state=OPENING 2024-12-15T01:33:45,025 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-15T01:33:45,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:45,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:45,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:45,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:45,036 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,039 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:33:45,214 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:33:45,215 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-15T01:33:45,217 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-15T01:33:45,229 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-15T01:33:45,229 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T01:33:45,229 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-15T01:33:45,232 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d575e28c61%2C42663%2C1734226422715.meta, suffix=.meta, logDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715, archiveDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs, maxLogs=32 2024-12-15T01:33:45,247 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715/66d575e28c61%2C42663%2C1734226422715.meta.1734226425234.meta, exclude list is [], retry=0 2024-12-15T01:33:45,251 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38745,DS-a619e4b2-7115-404b-8a2a-e1ff218d0fdc,DISK] 2024-12-15T01:33:45,251 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38403,DS-ed77dea0-c2d8-48b0-b5bf-eaf2aed0c4fa,DISK] 2024-12-15T01:33:45,251 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46345,DS-104a2fd6-e7a1-43fc-ac07-1d9598d0600c,DISK] 2024-12-15T01:33:45,254 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715/66d575e28c61%2C42663%2C1734226422715.meta.1734226425234.meta 2024-12-15T01:33:45,254 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45299:45299),(127.0.0.1/127.0.0.1:33539:33539),(127.0.0.1/127.0.0.1:45149:45149)] 2024-12-15T01:33:45,254 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-15T01:33:45,255 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-15T01:33:45,256 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:45,257 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-15T01:33:45,258 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-15T01:33:45,259 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-15T01:33:45,267 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-15T01:33:45,267 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:45,267 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-15T01:33:45,267 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-15T01:33:45,270 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-15T01:33:45,272 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-15T01:33:45,272 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:45,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T01:33:45,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-15T01:33:45,275 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-15T01:33:45,275 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:45,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T01:33:45,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-15T01:33:45,278 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-15T01:33:45,278 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:45,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T01:33:45,280 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740 2024-12-15T01:33:45,283 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740 2024-12-15T01:33:45,286 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-15T01:33:45,290 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-15T01:33:45,292 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74955936, jitterRate=0.11693048477172852}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-15T01:33:45,295 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-15T01:33:45,301 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734226425209 2024-12-15T01:33:45,311 DEBUG [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-15T01:33:45,311 INFO [RS_OPEN_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-15T01:33:45,312 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:33:45,314 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d575e28c61,42663,1734226422715, state=OPEN 2024-12-15T01:33:45,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-15T01:33:45,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-15T01:33:45,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-15T01:33:45,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-15T01:33:45,326 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,326 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,326 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,326 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T01:33:45,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-15T01:33:45,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=66d575e28c61,42663,1734226422715 in 283 msec 2024-12-15T01:33:45,340 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-15T01:33:45,340 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0020 sec 2024-12-15T01:33:45,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1990 sec 2024-12-15T01:33:45,348 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734226425348, completionTime=-1 2024-12-15T01:33:45,348 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-15T01:33:45,348 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-15T01:33:45,387 DEBUG [hconnection-0x613351a8-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:45,390 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:45,408 INFO [master/66d575e28c61:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-15T01:33:45,408 INFO [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734226485408 2024-12-15T01:33:45,408 INFO [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734226545408 2024-12-15T01:33:45,408 INFO [master/66d575e28c61:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 59 msec 2024-12-15T01:33:45,445 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:33:45,454 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,40789,1734226421776-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:45,454 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,40789,1734226421776-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:45,454 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,40789,1734226421776-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:45,456 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d575e28c61:40789, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:45,456 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:45,462 DEBUG [master/66d575e28c61:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-15T01:33:45,464 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-15T01:33:45,465 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-15T01:33:45,471 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-15T01:33:45,474 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:33:45,475 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:45,478 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:33:45,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741837_1013 (size=358) 2024-12-15T01:33:45,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741837_1013 (size=358) 2024-12-15T01:33:45,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741837_1013 (size=358) 2024-12-15T01:33:45,516 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 874e078b36e6d955b975f9e096ec95f5, NAME => 'hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:45,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741838_1014 (size=42) 2024-12-15T01:33:45,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741838_1014 (size=42) 2024-12-15T01:33:45,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741838_1014 (size=42) 2024-12-15T01:33:45,530 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:45,530 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 874e078b36e6d955b975f9e096ec95f5, disabling compactions & flushes 2024-12-15T01:33:45,530 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:33:45,530 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:33:45,530 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. after waiting 0 ms 2024-12-15T01:33:45,530 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:33:45,530 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:33:45,530 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 874e078b36e6d955b975f9e096ec95f5: 2024-12-15T01:33:45,533 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:33:45,538 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734226425534"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226425534"}]},"ts":"1734226425534"} 2024-12-15T01:33:45,586 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T01:33:45,592 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:33:45,597 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226425593"}]},"ts":"1734226425593"} 2024-12-15T01:33:45,604 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-15T01:33:45,644 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:33:45,646 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:33:45,646 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:33:45,646 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:33:45,646 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:33:45,646 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:33:45,646 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:33:45,646 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:33:45,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=874e078b36e6d955b975f9e096ec95f5, ASSIGN}] 2024-12-15T01:33:45,652 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=874e078b36e6d955b975f9e096ec95f5, ASSIGN 2024-12-15T01:33:45,658 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=874e078b36e6d955b975f9e096ec95f5, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:33:45,809 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-15T01:33:45,810 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=874e078b36e6d955b975f9e096ec95f5, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:33:45,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 874e078b36e6d955b975f9e096ec95f5, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:33:45,967 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:33:45,967 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-15T01:33:45,970 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-15T01:33:45,977 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:33:45,978 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 874e078b36e6d955b975f9e096ec95f5, NAME => 'hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5.', STARTKEY => '', ENDKEY => ''} 2024-12-15T01:33:45,978 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. service=AccessControlService 2024-12-15T01:33:45,978 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:45,979 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:33:45,979 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:45,979 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:33:45,979 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:33:45,981 INFO [StoreOpener-874e078b36e6d955b975f9e096ec95f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:33:45,984 INFO [StoreOpener-874e078b36e6d955b975f9e096ec95f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 874e078b36e6d955b975f9e096ec95f5 columnFamilyName info 2024-12-15T01:33:45,984 DEBUG [StoreOpener-874e078b36e6d955b975f9e096ec95f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:45,985 INFO [StoreOpener-874e078b36e6d955b975f9e096ec95f5-1 {}] regionserver.HStore(327): Store=874e078b36e6d955b975f9e096ec95f5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:33:45,986 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:33:45,986 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:33:45,990 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:33:45,996 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:33:45,998 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 874e078b36e6d955b975f9e096ec95f5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68832838, jitterRate=0.02568921446800232}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:33:45,999 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 874e078b36e6d955b975f9e096ec95f5: 2024-12-15T01:33:46,002 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5., pid=6, masterSystemTime=1734226425967 2024-12-15T01:33:46,010 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:33:46,010 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:33:46,011 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=874e078b36e6d955b975f9e096ec95f5, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:33:46,019 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-15T01:33:46,020 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 874e078b36e6d955b975f9e096ec95f5, server=66d575e28c61,39381,1734226422872 in 202 msec 2024-12-15T01:33:46,022 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-15T01:33:46,022 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=874e078b36e6d955b975f9e096ec95f5, ASSIGN in 371 msec 2024-12-15T01:33:46,023 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:33:46,023 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226426023"}]},"ts":"1734226426023"} 2024-12-15T01:33:46,026 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-15T01:33:46,037 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:33:46,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 572 msec 2024-12-15T01:33:46,074 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-15T01:33:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-15T01:33:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:46,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:46,109 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:46,111 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:46,128 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-15T01:33:46,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-15T01:33:46,176 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 56 msec 2024-12-15T01:33:46,185 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-15T01:33:46,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-15T01:33:46,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 38 msec 2024-12-15T01:33:46,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-15T01:33:46,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-15T01:33:46,302 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.316sec 2024-12-15T01:33:46,305 INFO [master/66d575e28c61:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-15T01:33:46,306 INFO [master/66d575e28c61:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-15T01:33:46,308 INFO [master/66d575e28c61:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-15T01:33:46,308 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-15T01:33:46,309 INFO [master/66d575e28c61:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-15T01:33:46,310 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,40789,1734226421776-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-15T01:33:46,310 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,40789,1734226421776-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-15T01:33:46,329 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-15T01:33:46,337 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-15T01:33:46,339 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:33:46,340 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:46,342 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:33:46,343 INFO [master/66d575e28c61:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-15T01:33:46,347 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T01:33:46,349 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2522ae47 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c5c7c2f 2024-12-15T01:33:46,350 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-15T01:33:46,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741839_1015 (size=349) 2024-12-15T01:33:46,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741839_1015 (size=349) 2024-12-15T01:33:46,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741839_1015 (size=349) 2024-12-15T01:33:46,374 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d0386827b59519398c69d2b9e07bf334, NAME => 'hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:46,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ec885e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:46,382 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-15T01:33:46,382 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-15T01:33:46,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741840_1016 (size=36) 2024-12-15T01:33:46,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741840_1016 (size=36) 2024-12-15T01:33:46,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741840_1016 (size=36) 2024-12-15T01:33:46,396 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:46,396 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing d0386827b59519398c69d2b9e07bf334, disabling compactions & flushes 2024-12-15T01:33:46,396 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:33:46,396 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:33:46,396 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. after waiting 0 ms 2024-12-15T01:33:46,396 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:33:46,396 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:33:46,396 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for d0386827b59519398c69d2b9e07bf334: 2024-12-15T01:33:46,399 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:33:46,399 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1734226426399"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226426399"}]},"ts":"1734226426399"} 2024-12-15T01:33:46,403 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T01:33:46,404 DEBUG [hconnection-0x6429ffb1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:46,405 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:33:46,406 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226426405"}]},"ts":"1734226426405"} 2024-12-15T01:33:46,409 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-15T01:33:46,416 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:46,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=66d575e28c61,40789,1734226421776 2024-12-15T01:33:46,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-15T01:33:46,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/test.cache.data in system properties and HBase conf 2024-12-15T01:33:46,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.tmp.dir in system properties and HBase conf 2024-12-15T01:33:46,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/nfs.dump.dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-15T01:33:46,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-15T01:33:46,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:33:46,437 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:33:46,437 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:33:46,437 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:33:46,437 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:33:46,437 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:33:46,437 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:33:46,437 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:33:46,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=d0386827b59519398c69d2b9e07bf334, ASSIGN}] 2024-12-15T01:33:46,440 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=d0386827b59519398c69d2b9e07bf334, ASSIGN 2024-12-15T01:33:46,442 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=d0386827b59519398c69d2b9e07bf334, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:33:46,449 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T01:33:46,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741841_1017 (size=592039) 2024-12-15T01:33:46,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741841_1017 (size=592039) 2024-12-15T01:33:46,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741841_1017 (size=592039) 2024-12-15T01:33:46,592 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-15T01:33:46,592 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d0386827b59519398c69d2b9e07bf334, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:33:46,596 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure d0386827b59519398c69d2b9e07bf334, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:33:46,650 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T01:33:46,749 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:33:46,749 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-15T01:33:46,751 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-15T01:33:46,757 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:33:46,757 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => d0386827b59519398c69d2b9e07bf334, NAME => 'hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334.', STARTKEY => '', ENDKEY => ''} 2024-12-15T01:33:46,758 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. service=AccessControlService 2024-12-15T01:33:46,758 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:46,758 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl d0386827b59519398c69d2b9e07bf334 2024-12-15T01:33:46,758 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:46,758 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for d0386827b59519398c69d2b9e07bf334 2024-12-15T01:33:46,758 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for d0386827b59519398c69d2b9e07bf334 2024-12-15T01:33:46,761 INFO [StoreOpener-d0386827b59519398c69d2b9e07bf334-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region d0386827b59519398c69d2b9e07bf334 2024-12-15T01:33:46,763 INFO [StoreOpener-d0386827b59519398c69d2b9e07bf334-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0386827b59519398c69d2b9e07bf334 columnFamilyName l 2024-12-15T01:33:46,763 DEBUG [StoreOpener-d0386827b59519398c69d2b9e07bf334-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:46,764 INFO [StoreOpener-d0386827b59519398c69d2b9e07bf334-1 {}] regionserver.HStore(327): Store=d0386827b59519398c69d2b9e07bf334/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:33:46,766 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334 2024-12-15T01:33:46,766 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334 2024-12-15T01:33:46,770 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for d0386827b59519398c69d2b9e07bf334 2024-12-15T01:33:46,773 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:33:46,774 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened d0386827b59519398c69d2b9e07bf334; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71856634, jitterRate=0.07074728608131409}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:33:46,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for d0386827b59519398c69d2b9e07bf334: 2024-12-15T01:33:46,777 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334., pid=11, masterSystemTime=1734226426749 2024-12-15T01:33:46,779 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:33:46,780 INFO [RS_OPEN_PRIORITY_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:33:46,781 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d0386827b59519398c69d2b9e07bf334, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:33:46,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-15T01:33:46,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure d0386827b59519398c69d2b9e07bf334, server=66d575e28c61,35281,1734226422799 in 189 msec 2024-12-15T01:33:46,791 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-15T01:33:46,791 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=d0386827b59519398c69d2b9e07bf334, ASSIGN in 350 msec 2024-12-15T01:33:46,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:33:46,793 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226426793"}]},"ts":"1734226426793"} 2024-12-15T01:33:46,796 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-15T01:33:46,804 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:33:46,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 475 msec 2024-12-15T01:33:46,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741842_1018 (size=1663647) 2024-12-15T01:33:46,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741842_1018 (size=1663647) 2024-12-15T01:33:46,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741842_1018 (size=1663647) 2024-12-15T01:33:46,950 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T01:33:46,950 INFO [master/66d575e28c61:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-15T01:33:46,956 DEBUG [master/66d575e28c61:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-15T01:33:46,957 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-15T01:33:46,958 INFO [master/66d575e28c61:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d575e28c61,40789,1734226421776-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T01:33:48,531 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:48,638 WARN [Thread-398 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:48,883 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-15T01:33:48,884 INFO [Thread-398 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:48,884 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:48,886 INFO [Thread-398 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:48,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:48,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:48,886 INFO [Thread-398 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:48,886 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-15T01:33:48,886 INFO [Thread-398 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-15T01:33:48,886 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b757255{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:48,887 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@653547ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:48,888 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:48,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17958c7a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:48,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7867c0c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:49,023 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-15T01:33:49,023 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-15T01:33:49,023 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-15T01:33:49,025 INFO [Thread-398 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-15T01:33:49,074 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:49,475 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:49,915 INFO [Thread-398 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:49,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22f35f84{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-41937-hadoop-yarn-common-3_4_1_jar-_-any-14688223666916978546/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-15T01:33:49,945 INFO [Thread-398 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b0f89f9{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-43089-hadoop-yarn-common-3_4_1_jar-_-any-309599822660140920/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-15T01:33:49,946 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d1a2166{HTTP/1.1, (http/1.1)}{localhost:41937} 2024-12-15T01:33:49,946 INFO [Thread-398 {}] server.AbstractConnector(333): Started ServerConnector@6bc50c27{HTTP/1.1, (http/1.1)}{localhost:43089} 2024-12-15T01:33:49,946 INFO [Time-limited test {}] server.Server(415): Started @15598ms 2024-12-15T01:33:49,946 INFO [Thread-398 {}] server.Server(415): Started @15598ms 2024-12-15T01:33:50,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741843_1019 (size=5) 2024-12-15T01:33:50,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741843_1019 (size=5) 2024-12-15T01:33:50,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741843_1019 (size=5) 2024-12-15T01:33:50,783 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:33:50,873 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-15T01:33:50,875 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-15T01:33:50,876 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-15T01:33:51,279 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-15T01:33:51,286 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:51,350 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-15T01:33:51,351 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:51,359 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:51,359 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:51,359 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-15T01:33:51,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:51,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44fd238{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:51,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a31478{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:51,426 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-15T01:33:51,426 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-15T01:33:51,426 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-15T01:33:51,426 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-15T01:33:51,439 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:51,461 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:51,591 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:51,612 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@595d61ca{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-39735-hadoop-yarn-common-3_4_1_jar-_-any-14931801847434808842/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-15T01:33:51,612 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b09f030{HTTP/1.1, (http/1.1)}{localhost:39735} 2024-12-15T01:33:51,613 INFO [Time-limited test {}] server.Server(415): Started @17265ms 2024-12-15T01:33:51,830 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-15T01:33:51,834 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:51,851 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-15T01:33:51,852 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T01:33:51,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T01:33:51,859 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T01:33:51,860 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-15T01:33:51,861 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T01:33:51,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1da2c307{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,AVAILABLE} 2024-12-15T01:33:51,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2876a3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-15T01:33:51,919 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-15T01:33:51,919 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-15T01:33:51,920 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-15T01:33:51,920 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-15T01:33:51,933 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:51,939 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:52,052 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-15T01:33:52,058 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66a81bd4{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/java.io.tmpdir/jetty-localhost-40553-hadoop-yarn-common-3_4_1_jar-_-any-3862269757148584610/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-15T01:33:52,059 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72ae2683{HTTP/1.1, (http/1.1)}{localhost:40553} 2024-12-15T01:33:52,059 INFO [Time-limited test {}] server.Server(415): Started @17712ms 2024-12-15T01:33:52,096 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-15T01:33:52,097 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:33:52,121 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=724, OpenFileDescriptor=783, MaxFileDescriptor=1048576, SystemLoadAverage=422, ProcessCount=11, AvailableMemoryMB=5961 2024-12-15T01:33:52,121 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=724 is superior to 500 2024-12-15T01:33:52,131 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T01:33:52,133 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T01:33:52,138 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:33:52,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:33:52,142 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:33:52,142 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-15T01:33:52,143 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:52,145 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:33:52,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T01:33:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741844_1020 (size=406) 2024-12-15T01:33:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741844_1020 (size=406) 2024-12-15T01:33:52,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741844_1020 (size=406) 2024-12-15T01:33:52,165 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4543aa4c02dba9f2c58b479a2039b7ca, NAME => 'testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:52,165 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 23b0667c1da493111bed50bdc7d62bf8, NAME => 'testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:52,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741845_1021 (size=67) 2024-12-15T01:33:52,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741845_1021 (size=67) 2024-12-15T01:33:52,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741845_1021 (size=67) 2024-12-15T01:33:52,178 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:52,178 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 4543aa4c02dba9f2c58b479a2039b7ca, disabling compactions & flushes 2024-12-15T01:33:52,178 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:52,178 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:52,178 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. after waiting 0 ms 2024-12-15T01:33:52,178 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:52,178 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:52,178 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4543aa4c02dba9f2c58b479a2039b7ca: 2024-12-15T01:33:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741846_1022 (size=67) 2024-12-15T01:33:52,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741846_1022 (size=67) 2024-12-15T01:33:52,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741846_1022 (size=67) 2024-12-15T01:33:52,184 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:52,184 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing 23b0667c1da493111bed50bdc7d62bf8, disabling compactions & flushes 2024-12-15T01:33:52,184 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:52,184 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:52,184 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. after waiting 0 ms 2024-12-15T01:33:52,184 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:52,184 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:52,184 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for 23b0667c1da493111bed50bdc7d62bf8: 2024-12-15T01:33:52,186 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:33:52,187 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734226432186"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226432186"}]},"ts":"1734226432186"} 2024-12-15T01:33:52,187 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734226432186"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226432186"}]},"ts":"1734226432186"} 2024-12-15T01:33:52,224 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:33:52,226 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:33:52,227 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226432226"}]},"ts":"1734226432226"} 2024-12-15T01:33:52,234 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-15T01:33:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T01:33:52,252 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:33:52,254 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:33:52,254 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:33:52,254 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:33:52,254 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:33:52,254 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:33:52,254 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:33:52,254 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:33:52,255 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4543aa4c02dba9f2c58b479a2039b7ca, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=23b0667c1da493111bed50bdc7d62bf8, ASSIGN}] 2024-12-15T01:33:52,257 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=23b0667c1da493111bed50bdc7d62bf8, ASSIGN 2024-12-15T01:33:52,257 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4543aa4c02dba9f2c58b479a2039b7ca, ASSIGN 2024-12-15T01:33:52,258 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4543aa4c02dba9f2c58b479a2039b7ca, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:33:52,258 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=23b0667c1da493111bed50bdc7d62bf8, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:33:52,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:33:52,294 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-15T01:33:52,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-15T01:33:52,294 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-15T01:33:52,296 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-15T01:33:52,296 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-15T01:33:52,297 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-15T01:33:52,297 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-15T01:33:52,297 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:33:52,297 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-15T01:33:52,298 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-15T01:33:52,298 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-15T01:33:52,298 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:33:52,298 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-15T01:33:52,299 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-15T01:33:52,299 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-15T01:33:52,299 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-15T01:33:52,300 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-15T01:33:52,408 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:33:52,411 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=4543aa4c02dba9f2c58b479a2039b7ca, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:33:52,411 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=23b0667c1da493111bed50bdc7d62bf8, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:33:52,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; OpenRegionProcedure 23b0667c1da493111bed50bdc7d62bf8, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:33:52,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE; OpenRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:33:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T01:33:52,573 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:33:52,583 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:33:52,595 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:52,595 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => 23b0667c1da493111bed50bdc7d62bf8, NAME => 'testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:33:52,595 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. service=AccessControlService 2024-12-15T01:33:52,596 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:52,596 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:52,596 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:52,596 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:52,596 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:52,598 INFO [StoreOpener-23b0667c1da493111bed50bdc7d62bf8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:52,601 INFO [StoreOpener-23b0667c1da493111bed50bdc7d62bf8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 23b0667c1da493111bed50bdc7d62bf8 columnFamilyName cf 2024-12-15T01:33:52,601 DEBUG [StoreOpener-23b0667c1da493111bed50bdc7d62bf8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:52,602 INFO [StoreOpener-23b0667c1da493111bed50bdc7d62bf8-1 {}] regionserver.HStore(327): Store=23b0667c1da493111bed50bdc7d62bf8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:33:52,604 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:52,605 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:52,607 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:52,608 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 4543aa4c02dba9f2c58b479a2039b7ca, NAME => 'testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:33:52,608 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. service=AccessControlService 2024-12-15T01:33:52,608 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:33:52,609 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:52,609 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:33:52,609 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:52,609 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:52,609 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:52,611 INFO [StoreOpener-4543aa4c02dba9f2c58b479a2039b7ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:52,612 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:33:52,613 INFO [StoreOpener-4543aa4c02dba9f2c58b479a2039b7ca-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4543aa4c02dba9f2c58b479a2039b7ca columnFamilyName cf 2024-12-15T01:33:52,613 DEBUG [StoreOpener-4543aa4c02dba9f2c58b479a2039b7ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:33:52,613 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened 23b0667c1da493111bed50bdc7d62bf8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60223735, jitterRate=-0.10259641706943512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:33:52,614 INFO [StoreOpener-4543aa4c02dba9f2c58b479a2039b7ca-1 {}] regionserver.HStore(327): Store=4543aa4c02dba9f2c58b479a2039b7ca/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:33:52,614 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for 23b0667c1da493111bed50bdc7d62bf8: 2024-12-15T01:33:52,616 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:52,616 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:52,620 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8., pid=15, masterSystemTime=1734226432573 2024-12-15T01:33:52,624 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:52,624 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:52,624 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:52,625 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=23b0667c1da493111bed50bdc7d62bf8, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:33:52,631 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:33:52,632 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 4543aa4c02dba9f2c58b479a2039b7ca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60133018, jitterRate=-0.1039482057094574}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:33:52,632 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 4543aa4c02dba9f2c58b479a2039b7ca: 2024-12-15T01:33:52,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-15T01:33:52,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; OpenRegionProcedure 23b0667c1da493111bed50bdc7d62bf8, server=66d575e28c61,35281,1734226422799 in 212 msec 2024-12-15T01:33:52,635 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca., pid=16, masterSystemTime=1734226432583 2024-12-15T01:33:52,637 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=23b0667c1da493111bed50bdc7d62bf8, ASSIGN in 379 msec 2024-12-15T01:33:52,638 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:52,638 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:52,639 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=4543aa4c02dba9f2c58b479a2039b7ca, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:33:52,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=13 2024-12-15T01:33:52,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=13, state=SUCCESS; OpenRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca, server=66d575e28c61,39381,1734226422872 in 220 msec 2024-12-15T01:33:52,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-15T01:33:52,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4543aa4c02dba9f2c58b479a2039b7ca, ASSIGN in 392 msec 2024-12-15T01:33:52,652 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:33:52,652 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226432652"}]},"ts":"1734226432652"} 2024-12-15T01:33:52,654 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-15T01:33:52,670 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:33:52,673 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-15T01:33:52,677 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:52,679 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:52,683 DEBUG [hconnection-0x325899b3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:52,705 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-15T01:33:52,713 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-15T01:33:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-15T01:33:52,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:33:52,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-15T01:33:52,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-15T01:33:52,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-15T01:33:52,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-15T01:33:52,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 656 msec 2024-12-15T01:33:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T01:33:53,253 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-15T01:33:53,254 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-15T01:33:53,255 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:33:53,263 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-15T01:33:53,264 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:33:53,264 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-15T01:33:53,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-15T01:33:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226433277 (current time:1734226433277). 2024-12-15T01:33:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:33:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-15T01:33:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:33:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x71dcb237 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1020f0a3 2024-12-15T01:33:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17357cc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:53,290 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x71dcb237 to 127.0.0.1:64248 2024-12-15T01:33:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:33:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4067a9c9 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ba96490 2024-12-15T01:33:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@165021, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:53,320 DEBUG [hconnection-0x574cb442-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:53,321 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:53,329 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4067a9c9 to 127.0.0.1:64248 2024-12-15T01:33:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:33:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-15T01:33:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:33:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-15T01:33:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-15T01:33:53,357 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:33:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-15T01:33:53,363 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:33:53,376 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:33:53,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741847_1023 (size=167) 2024-12-15T01:33:53,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741847_1023 (size=167) 2024-12-15T01:33:53,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741847_1023 (size=167) 2024-12-15T01:33:53,395 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:33:53,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8}] 2024-12-15T01:33:53,403 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:53,403 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-15T01:33:53,559 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:33:53,559 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:33:53,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-15T01:33:53,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-15T01:33:53,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:53,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:53,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 23b0667c1da493111bed50bdc7d62bf8: 2024-12-15T01:33:53,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 4543aa4c02dba9f2c58b479a2039b7ca: 2024-12-15T01:33:53,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. for emptySnaptb0-testExportWithTargetName completed. 2024-12-15T01:33:53,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. for emptySnaptb0-testExportWithTargetName completed. 2024-12-15T01:33:53,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-15T01:33:53,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-15T01:33:53,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:33:53,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:33:53,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:33:53,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:33:53,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741849_1025 (size=70) 2024-12-15T01:33:53,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741849_1025 (size=70) 2024-12-15T01:33:53,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741849_1025 (size=70) 2024-12-15T01:33:53,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:53,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741848_1024 (size=70) 2024-12-15T01:33:53,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-15T01:33:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-15T01:33:53,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:53,594 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:53,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741848_1024 (size=70) 2024-12-15T01:33:53,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741848_1024 (size=70) 2024-12-15T01:33:53,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8 in 199 msec 2024-12-15T01:33:53,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:53,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-15T01:33:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-15T01:33:53,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:53,605 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:53,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=17 2024-12-15T01:33:53,610 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:33:53,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca in 208 msec 2024-12-15T01:33:53,612 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:33:53,615 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:33:53,615 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-15T01:33:53,618 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-15T01:33:53,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741850_1026 (size=549) 2024-12-15T01:33:53,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741850_1026 (size=549) 2024-12-15T01:33:53,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741850_1026 (size=549) 2024-12-15T01:33:53,650 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:33:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-15T01:33:53,663 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:33:53,664 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-15T01:33:53,667 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:33:53,667 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-15T01:33:53,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 320 msec 2024-12-15T01:33:53,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-15T01:33:53,964 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-15T01:33:53,984 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:53,985 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:53,987 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41030, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:53,987 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54568, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:53,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:33:53,990 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:33:54,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-15T01:33:54,012 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:54,013 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:33:54,070 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-15T01:33:54,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226434071 (current time:1734226434071). 2024-12-15T01:33:54,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:33:54,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-15T01:33:54,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:33:54,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e0d07b2 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@200002c9 2024-12-15T01:33:54,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dc48001, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:54,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:54,092 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:54,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e0d07b2 to 127.0.0.1:64248 2024-12-15T01:33:54,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:33:54,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b916edc to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b27312d 2024-12-15T01:33:54,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@528a6ac0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:33:54,123 DEBUG [hconnection-0x311c6f3e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:54,124 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:54,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:33:54,129 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:33:54,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b916edc to 127.0.0.1:64248 2024-12-15T01:33:54,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:33:54,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-15T01:33:54,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:33:54,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-15T01:33:54,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-15T01:33:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T01:33:54,135 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:33:54,137 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:33:54,142 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:33:54,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741851_1027 (size=162) 2024-12-15T01:33:54,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741851_1027 (size=162) 2024-12-15T01:33:54,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741851_1027 (size=162) 2024-12-15T01:33:54,186 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:33:54,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8}] 2024-12-15T01:33:54,188 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:54,189 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:54,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T01:33:54,342 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:33:54,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:33:54,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-15T01:33:54,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-15T01:33:54,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:54,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:54,348 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 4543aa4c02dba9f2c58b479a2039b7ca 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-15T01:33:54,348 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing 23b0667c1da493111bed50bdc7d62bf8 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-15T01:33:54,424 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/.tmp/cf/00e559b396c34be0915cdbf94366f504 is 71, key is 0738f4bb548135ad41ae4cb180bcf46f/cf:q/1734226433988/Put/seqid=0 2024-12-15T01:33:54,424 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/.tmp/cf/a8fd5a13b83a4553af94b39787ea0c70 is 71, key is 11aba0b9dae1619a48cc81f3b46c79a6/cf:q/1734226433990/Put/seqid=0 2024-12-15T01:33:54,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T01:33:54,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741853_1029 (size=8256) 2024-12-15T01:33:54,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741853_1029 (size=8256) 2024-12-15T01:33:54,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741852_1028 (size=5356) 2024-12-15T01:33:54,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741852_1028 (size=5356) 2024-12-15T01:33:54,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741853_1029 (size=8256) 2024-12-15T01:33:54,445 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/.tmp/cf/a8fd5a13b83a4553af94b39787ea0c70 2024-12-15T01:33:54,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741852_1028 (size=5356) 2024-12-15T01:33:54,452 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/.tmp/cf/00e559b396c34be0915cdbf94366f504 2024-12-15T01:33:54,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/.tmp/cf/00e559b396c34be0915cdbf94366f504 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/cf/00e559b396c34be0915cdbf94366f504 2024-12-15T01:33:54,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/.tmp/cf/a8fd5a13b83a4553af94b39787ea0c70 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/cf/a8fd5a13b83a4553af94b39787ea0c70 2024-12-15T01:33:54,529 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/cf/00e559b396c34be0915cdbf94366f504, entries=4, sequenceid=6, filesize=5.2 K 2024-12-15T01:33:54,529 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/cf/a8fd5a13b83a4553af94b39787ea0c70, entries=46, sequenceid=6, filesize=8.1 K 2024-12-15T01:33:54,533 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 4543aa4c02dba9f2c58b479a2039b7ca in 185ms, sequenceid=6, compaction requested=false 2024-12-15T01:33:54,533 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 23b0667c1da493111bed50bdc7d62bf8 in 185ms, sequenceid=6, compaction requested=false 2024-12-15T01:33:54,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-15T01:33:54,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 4543aa4c02dba9f2c58b479a2039b7ca: 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. for snaptb0-testExportWithTargetName completed. 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for 23b0667c1da493111bed50bdc7d62bf8: 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. for snaptb0-testExportWithTargetName completed. 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:33:54,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:33:54,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/cf/a8fd5a13b83a4553af94b39787ea0c70] hfiles 2024-12-15T01:33:54,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/cf/00e559b396c34be0915cdbf94366f504] hfiles 2024-12-15T01:33:54,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/cf/00e559b396c34be0915cdbf94366f504 for snapshot=snaptb0-testExportWithTargetName 2024-12-15T01:33:54,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/cf/a8fd5a13b83a4553af94b39787ea0c70 for snapshot=snaptb0-testExportWithTargetName 2024-12-15T01:33:54,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741855_1031 (size=109) 2024-12-15T01:33:54,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741855_1031 (size=109) 2024-12-15T01:33:54,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741855_1031 (size=109) 2024-12-15T01:33:54,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:33:54,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-15T01:33:54,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-15T01:33:54,557 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:54,557 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:33:54,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca in 372 msec 2024-12-15T01:33:54,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741854_1030 (size=109) 2024-12-15T01:33:54,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741854_1030 (size=109) 2024-12-15T01:33:54,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741854_1030 (size=109) 2024-12-15T01:33:54,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:33:54,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-15T01:33:54,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-15T01:33:54,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:54,571 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:33:54,575 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=22, resume processing ppid=20 2024-12-15T01:33:54,575 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:33:54,575 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure 23b0667c1da493111bed50bdc7d62bf8 in 386 msec 2024-12-15T01:33:54,577 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:33:54,578 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:33:54,578 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-15T01:33:54,580 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-15T01:33:54,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741856_1032 (size=627) 2024-12-15T01:33:54,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741856_1032 (size=627) 2024-12-15T01:33:54,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741856_1032 (size=627) 2024-12-15T01:33:54,606 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:33:54,617 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:33:54,618 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-15T01:33:54,620 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:33:54,620 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-15T01:33:54,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 488 msec 2024-12-15T01:33:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T01:33:54,743 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-15T01:33:54,743 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743 2024-12-15T01:33:54,743 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:54,793 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:33:54,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-15T01:33:54,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:33:54,821 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-15T01:33:54,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741857_1033 (size=627) 2024-12-15T01:33:54,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741857_1033 (size=627) 2024-12-15T01:33:54,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741857_1033 (size=627) 2024-12-15T01:33:54,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741858_1034 (size=162) 2024-12-15T01:33:54,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741858_1034 (size=162) 2024-12-15T01:33:54,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741858_1034 (size=162) 2024-12-15T01:33:54,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741859_1035 (size=154) 2024-12-15T01:33:54,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741859_1035 (size=154) 2024-12-15T01:33:54,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741859_1035 (size=154) 2024-12-15T01:33:55,547 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-3037354407658986550.jar 2024-12-15T01:33:55,548 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:55,548 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:55,548 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-1298831196358944207.jar 2024-12-15T01:33:56,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,480 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-12377687879654356731.jar 2024-12-15T01:33:56,481 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,481 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,481 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,482 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,482 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,482 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:33:56,483 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:33:56,483 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:33:56,483 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:33:56,484 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:33:56,484 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:33:56,484 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:33:56,484 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:33:56,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:33:56,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:33:56,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:33:56,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:33:56,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:33:56,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:33:56,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:33:56,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:33:56,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:33:56,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:33:56,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:33:56,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:33:56,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741860_1036 (size=127628) 2024-12-15T01:33:56,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741860_1036 (size=127628) 2024-12-15T01:33:56,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741860_1036 (size=127628) 2024-12-15T01:33:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741861_1037 (size=912094) 2024-12-15T01:33:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741861_1037 (size=912094) 2024-12-15T01:33:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741861_1037 (size=912094) 2024-12-15T01:33:56,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741862_1038 (size=2172137) 2024-12-15T01:33:56,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741862_1038 (size=2172137) 2024-12-15T01:33:56,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741862_1038 (size=2172137) 2024-12-15T01:33:56,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741863_1039 (size=213228) 2024-12-15T01:33:56,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741863_1039 (size=213228) 2024-12-15T01:33:56,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741863_1039 (size=213228) 2024-12-15T01:33:56,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741864_1040 (size=1877034) 2024-12-15T01:33:56,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741864_1040 (size=1877034) 2024-12-15T01:33:56,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741864_1040 (size=1877034) 2024-12-15T01:33:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741865_1041 (size=533455) 2024-12-15T01:33:56,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741865_1041 (size=533455) 2024-12-15T01:33:56,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741865_1041 (size=533455) 2024-12-15T01:33:56,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741866_1042 (size=7280644) 2024-12-15T01:33:56,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741866_1042 (size=7280644) 2024-12-15T01:33:56,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741866_1042 (size=7280644) 2024-12-15T01:33:56,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741867_1043 (size=4188619) 2024-12-15T01:33:56,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741867_1043 (size=4188619) 2024-12-15T01:33:56,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741867_1043 (size=4188619) 2024-12-15T01:33:56,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741868_1044 (size=20406) 2024-12-15T01:33:56,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741868_1044 (size=20406) 2024-12-15T01:33:56,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741868_1044 (size=20406) 2024-12-15T01:33:56,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741869_1045 (size=75495) 2024-12-15T01:33:56,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741869_1045 (size=75495) 2024-12-15T01:33:56,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741869_1045 (size=75495) 2024-12-15T01:33:56,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741870_1046 (size=45609) 2024-12-15T01:33:56,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741870_1046 (size=45609) 2024-12-15T01:33:56,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741870_1046 (size=45609) 2024-12-15T01:33:56,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741871_1047 (size=110084) 2024-12-15T01:33:56,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741871_1047 (size=110084) 2024-12-15T01:33:56,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741871_1047 (size=110084) 2024-12-15T01:33:57,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741872_1048 (size=6350909) 2024-12-15T01:33:57,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741872_1048 (size=6350909) 2024-12-15T01:33:57,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741872_1048 (size=6350909) 2024-12-15T01:33:57,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741873_1049 (size=1323991) 2024-12-15T01:33:57,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741873_1049 (size=1323991) 2024-12-15T01:33:57,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741873_1049 (size=1323991) 2024-12-15T01:33:57,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741874_1050 (size=23076) 2024-12-15T01:33:57,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741874_1050 (size=23076) 2024-12-15T01:33:57,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741874_1050 (size=23076) 2024-12-15T01:33:57,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741875_1051 (size=126803) 2024-12-15T01:33:57,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741875_1051 (size=126803) 2024-12-15T01:33:57,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741875_1051 (size=126803) 2024-12-15T01:33:57,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741876_1052 (size=322274) 2024-12-15T01:33:57,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741876_1052 (size=322274) 2024-12-15T01:33:57,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741876_1052 (size=322274) 2024-12-15T01:33:57,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741877_1053 (size=451756) 2024-12-15T01:33:57,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741877_1053 (size=451756) 2024-12-15T01:33:57,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741877_1053 (size=451756) 2024-12-15T01:33:57,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741878_1054 (size=1832290) 2024-12-15T01:33:57,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741878_1054 (size=1832290) 2024-12-15T01:33:57,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741878_1054 (size=1832290) 2024-12-15T01:33:57,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741879_1055 (size=30081) 2024-12-15T01:33:57,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741879_1055 (size=30081) 2024-12-15T01:33:57,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741879_1055 (size=30081) 2024-12-15T01:33:57,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741880_1056 (size=53616) 2024-12-15T01:33:57,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741880_1056 (size=53616) 2024-12-15T01:33:57,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741880_1056 (size=53616) 2024-12-15T01:33:57,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741881_1057 (size=29229) 2024-12-15T01:33:57,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741881_1057 (size=29229) 2024-12-15T01:33:57,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741881_1057 (size=29229) 2024-12-15T01:33:57,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741882_1058 (size=169089) 2024-12-15T01:33:57,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741882_1058 (size=169089) 2024-12-15T01:33:57,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741882_1058 (size=169089) 2024-12-15T01:33:57,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741883_1059 (size=5175431) 2024-12-15T01:33:57,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741883_1059 (size=5175431) 2024-12-15T01:33:57,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741883_1059 (size=5175431) 2024-12-15T01:33:57,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741884_1060 (size=136454) 2024-12-15T01:33:57,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741884_1060 (size=136454) 2024-12-15T01:33:57,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741884_1060 (size=136454) 2024-12-15T01:33:57,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741885_1061 (size=3317408) 2024-12-15T01:33:57,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741885_1061 (size=3317408) 2024-12-15T01:33:57,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741885_1061 (size=3317408) 2024-12-15T01:33:57,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741886_1062 (size=503880) 2024-12-15T01:33:57,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741886_1062 (size=503880) 2024-12-15T01:33:57,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741886_1062 (size=503880) 2024-12-15T01:33:57,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741887_1063 (size=4695811) 2024-12-15T01:33:57,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741887_1063 (size=4695811) 2024-12-15T01:33:57,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741887_1063 (size=4695811) 2024-12-15T01:33:57,615 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:33:57,622 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-15T01:33:57,629 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:33:57,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741888_1064 (size=342) 2024-12-15T01:33:57,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741888_1064 (size=342) 2024-12-15T01:33:57,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741888_1064 (size=342) 2024-12-15T01:33:57,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741889_1065 (size=15) 2024-12-15T01:33:57,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741889_1065 (size=15) 2024-12-15T01:33:57,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741889_1065 (size=15) 2024-12-15T01:33:57,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741890_1066 (size=304936) 2024-12-15T01:33:57,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741890_1066 (size=304936) 2024-12-15T01:33:57,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741890_1066 (size=304936) 2024-12-15T01:33:58,099 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:33:58,407 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:33:58,407 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:33:58,801 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0001_000001 (auth:SIMPLE) from 127.0.0.1:52698 2024-12-15T01:34:02,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-15T01:34:02,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-15T01:34:06,036 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0001_000001 (auth:SIMPLE) from 127.0.0.1:49860 2024-12-15T01:34:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741891_1067 (size=350610) 2024-12-15T01:34:06,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741891_1067 (size=350610) 2024-12-15T01:34:06,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741891_1067 (size=350610) 2024-12-15T01:34:08,400 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0001_000001 (auth:SIMPLE) from 127.0.0.1:34864 2024-12-15T01:34:10,817 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:34:11,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741892_1068 (size=8256) 2024-12-15T01:34:11,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741892_1068 (size=8256) 2024-12-15T01:34:11,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741892_1068 (size=8256) 2024-12-15T01:34:11,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741893_1069 (size=5356) 2024-12-15T01:34:11,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741893_1069 (size=5356) 2024-12-15T01:34:11,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741893_1069 (size=5356) 2024-12-15T01:34:11,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741894_1070 (size=17419) 2024-12-15T01:34:11,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741894_1070 (size=17419) 2024-12-15T01:34:11,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741894_1070 (size=17419) 2024-12-15T01:34:11,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741895_1071 (size=464) 2024-12-15T01:34:11,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741895_1071 (size=464) 2024-12-15T01:34:11,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741895_1071 (size=464) 2024-12-15T01:34:11,614 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0001/container_1734226430009_0001_01_000002/launch_container.sh] 2024-12-15T01:34:11,614 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0001/container_1734226430009_0001_01_000002/container_tokens] 2024-12-15T01:34:11,615 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0001/container_1734226430009_0001_01_000002/sysfs] 2024-12-15T01:34:11,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741896_1072 (size=17419) 2024-12-15T01:34:11,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741896_1072 (size=17419) 2024-12-15T01:34:11,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741896_1072 (size=17419) 2024-12-15T01:34:11,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741897_1073 (size=350610) 2024-12-15T01:34:11,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741897_1073 (size=350610) 2024-12-15T01:34:11,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741897_1073 (size=350610) 2024-12-15T01:34:11,691 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0001_000001 (auth:SIMPLE) from 127.0.0.1:43046 2024-12-15T01:34:13,749 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:34:13,750 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:34:13,760 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-15T01:34:13,760 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:34:13,761 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:34:13,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-15T01:34:13,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-15T01:34:13,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-15T01:34:13,762 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743/.hbase-snapshot/testExportWithTargetName 2024-12-15T01:34:13,763 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-15T01:34:13,763 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226434743/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-15T01:34:13,775 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-15T01:34:13,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-15T01:34:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:34:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-15T01:34:13,789 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226453788"}]},"ts":"1734226453788"} 2024-12-15T01:34:13,791 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-15T01:34:13,808 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-15T01:34:13,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-15T01:34:13,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4543aa4c02dba9f2c58b479a2039b7ca, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=23b0667c1da493111bed50bdc7d62bf8, UNASSIGN}] 2024-12-15T01:34:13,818 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=23b0667c1da493111bed50bdc7d62bf8, UNASSIGN 2024-12-15T01:34:13,818 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4543aa4c02dba9f2c58b479a2039b7ca, UNASSIGN 2024-12-15T01:34:13,819 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=4543aa4c02dba9f2c58b479a2039b7ca, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:34:13,819 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=23b0667c1da493111bed50bdc7d62bf8, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:13,821 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:13,822 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE; CloseRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:34:13,823 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:13,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE; CloseRegionProcedure 23b0667c1da493111bed50bdc7d62bf8, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:34:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-15T01:34:13,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:13,979 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:13,981 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:34:13,981 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:34:13,981 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:13,981 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing 4543aa4c02dba9f2c58b479a2039b7ca, disabling compactions & flushes 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 23b0667c1da493111bed50bdc7d62bf8, disabling compactions & flushes 2024-12-15T01:34:13,982 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:34:13,982 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. after waiting 0 ms 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. after waiting 0 ms 2024-12-15T01:34:13,982 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:34:13,989 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:34:13,989 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:34:13,991 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:13,991 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:13,992 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca. 2024-12-15T01:34:13,992 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8. 2024-12-15T01:34:13,992 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for 4543aa4c02dba9f2c58b479a2039b7ca: 2024-12-15T01:34:13,992 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 23b0667c1da493111bed50bdc7d62bf8: 2024-12-15T01:34:13,994 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed 4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:34:13,995 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=4543aa4c02dba9f2c58b479a2039b7ca, regionState=CLOSED 2024-12-15T01:34:13,995 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:34:13,996 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=23b0667c1da493111bed50bdc7d62bf8, regionState=CLOSED 2024-12-15T01:34:14,002 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=25 2024-12-15T01:34:14,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=25, state=SUCCESS; CloseRegionProcedure 4543aa4c02dba9f2c58b479a2039b7ca, server=66d575e28c61,39381,1734226422872 in 174 msec 2024-12-15T01:34:14,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=26 2024-12-15T01:34:14,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=26, state=SUCCESS; CloseRegionProcedure 23b0667c1da493111bed50bdc7d62bf8, server=66d575e28c61,35281,1734226422799 in 175 msec 2024-12-15T01:34:14,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=4543aa4c02dba9f2c58b479a2039b7ca, UNASSIGN in 186 msec 2024-12-15T01:34:14,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=26, resume processing ppid=24 2024-12-15T01:34:14,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=23b0667c1da493111bed50bdc7d62bf8, UNASSIGN in 187 msec 2024-12-15T01:34:14,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-15T01:34:14,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 197 msec 2024-12-15T01:34:14,011 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226454010"}]},"ts":"1734226454010"} 2024-12-15T01:34:14,012 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-15T01:34:14,019 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-15T01:34:14,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 240 msec 2024-12-15T01:34:14,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-15T01:34:14,091 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-15T01:34:14,095 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-15T01:34:14,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:34:14,101 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:34:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-15T01:34:14,103 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:34:14,106 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-15T01:34:14,110 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:34:14,110 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:34:14,114 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/recovered.edits] 2024-12-15T01:34:14,114 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/recovered.edits] 2024-12-15T01:34:14,122 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/cf/00e559b396c34be0915cdbf94366f504 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/cf/00e559b396c34be0915cdbf94366f504 2024-12-15T01:34:14,122 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/cf/a8fd5a13b83a4553af94b39787ea0c70 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/cf/a8fd5a13b83a4553af94b39787ea0c70 2024-12-15T01:34:14,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-15T01:34:14,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-15T01:34:14,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-15T01:34:14,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-15T01:34:14,127 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca/recovered.edits/9.seqid 2024-12-15T01:34:14,127 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8/recovered.edits/9.seqid 2024-12-15T01:34:14,128 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/23b0667c1da493111bed50bdc7d62bf8 2024-12-15T01:34:14,128 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithTargetName/4543aa4c02dba9f2c58b479a2039b7ca 2024-12-15T01:34:14,128 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-15T01:34:14,131 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:34:14,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-15T01:34:14,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-15T01:34:14,133 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:14,133 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:14,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-15T01:34:14,133 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:14,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-15T01:34:14,133 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:14,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-15T01:34:14,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42663 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-15T01:34:14,139 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-15T01:34:14,143 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-15T01:34:14,144 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:34:14,144 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-15T01:34:14,145 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226454144"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:14,145 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226454144"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:14,148 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:34:14,148 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4543aa4c02dba9f2c58b479a2039b7ca, NAME => 'testtb-testExportWithTargetName,,1734226432138.4543aa4c02dba9f2c58b479a2039b7ca.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 23b0667c1da493111bed50bdc7d62bf8, NAME => 'testtb-testExportWithTargetName,1,1734226432138.23b0667c1da493111bed50bdc7d62bf8.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:34:14,148 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-15T01:34:14,148 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226454148"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:14,151 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-15T01:34:14,162 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-15T01:34:14,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 66 msec 2024-12-15T01:34:14,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-15T01:34:14,237 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-15T01:34:14,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-15T01:34:14,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-15T01:34:14,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-15T01:34:14,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-15T01:34:14,293 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=777 (was 724) Potentially hanging thread: RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:52850 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:56780 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44433 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44155 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:33801 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:44155 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:60492 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1374091780_1 at /127.0.0.1:60468 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 53379) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1374091780_1 at /127.0.0.1:52832 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1296 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33801 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40017 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 783) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=475 (was 422) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4256 (was 5961) 2024-12-15T01:34:14,293 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-12-15T01:34:14,310 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=777, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=475, ProcessCount=17, AvailableMemoryMB=4255 2024-12-15T01:34:14,310 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-12-15T01:34:14,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:34:14,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:14,314 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:34:14,315 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:14,315 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-15T01:34:14,316 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:34:14,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-15T01:34:14,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741898_1074 (size=404) 2024-12-15T01:34:14,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741898_1074 (size=404) 2024-12-15T01:34:14,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741898_1074 (size=404) 2024-12-15T01:34:14,326 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ad54665aa248be3194f85293da98468c, NAME => 'testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:14,326 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 31ee6c3058cfcc8af5576ab0cafa36c1, NAME => 'testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:14,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741899_1075 (size=65) 2024-12-15T01:34:14,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741899_1075 (size=65) 2024-12-15T01:34:14,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741900_1076 (size=65) 2024-12-15T01:34:14,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741900_1076 (size=65) 2024-12-15T01:34:14,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741900_1076 (size=65) 2024-12-15T01:34:14,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741899_1075 (size=65) 2024-12-15T01:34:14,337 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:14,337 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing ad54665aa248be3194f85293da98468c, disabling compactions & flushes 2024-12-15T01:34:14,337 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:14,337 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:14,337 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. after waiting 0 ms 2024-12-15T01:34:14,337 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:14,337 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:14,337 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for ad54665aa248be3194f85293da98468c: 2024-12-15T01:34:14,340 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:14,340 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 31ee6c3058cfcc8af5576ab0cafa36c1, disabling compactions & flushes 2024-12-15T01:34:14,340 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:14,340 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:14,340 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. after waiting 0 ms 2024-12-15T01:34:14,340 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:14,340 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:14,340 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 31ee6c3058cfcc8af5576ab0cafa36c1: 2024-12-15T01:34:14,342 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:34:14,342 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734226454342"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226454342"}]},"ts":"1734226454342"} 2024-12-15T01:34:14,343 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734226454342"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226454342"}]},"ts":"1734226454342"} 2024-12-15T01:34:14,346 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:34:14,347 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:34:14,347 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226454347"}]},"ts":"1734226454347"} 2024-12-15T01:34:14,350 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-15T01:34:14,369 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:34:14,371 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:34:14,371 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:34:14,371 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:34:14,371 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:34:14,371 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:34:14,371 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:34:14,371 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:34:14,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ad54665aa248be3194f85293da98468c, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=31ee6c3058cfcc8af5576ab0cafa36c1, ASSIGN}] 2024-12-15T01:34:14,373 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=31ee6c3058cfcc8af5576ab0cafa36c1, ASSIGN 2024-12-15T01:34:14,374 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ad54665aa248be3194f85293da98468c, ASSIGN 2024-12-15T01:34:14,375 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=31ee6c3058cfcc8af5576ab0cafa36c1, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:34:14,375 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ad54665aa248be3194f85293da98468c, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:34:14,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-15T01:34:14,525 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:34:14,525 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=31ee6c3058cfcc8af5576ab0cafa36c1, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:14,525 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=ad54665aa248be3194f85293da98468c, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:34:14,527 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=31, state=RUNNABLE; OpenRegionProcedure ad54665aa248be3194f85293da98468c, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:34:14,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=32, state=RUNNABLE; OpenRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:34:14,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-15T01:34:14,680 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:14,681 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:14,685 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:14,685 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => ad54665aa248be3194f85293da98468c, NAME => 'testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:34:14,685 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:14,686 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 31ee6c3058cfcc8af5576ab0cafa36c1, NAME => 'testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:34:14,686 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. service=AccessControlService 2024-12-15T01:34:14,686 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:14,686 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. service=AccessControlService 2024-12-15T01:34:14,686 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl ad54665aa248be3194f85293da98468c 2024-12-15T01:34:14,686 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:14,686 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:14,687 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for ad54665aa248be3194f85293da98468c 2024-12-15T01:34:14,687 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for ad54665aa248be3194f85293da98468c 2024-12-15T01:34:14,687 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:14,687 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:14,687 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:14,687 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:14,689 INFO [StoreOpener-ad54665aa248be3194f85293da98468c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad54665aa248be3194f85293da98468c 2024-12-15T01:34:14,689 INFO [StoreOpener-31ee6c3058cfcc8af5576ab0cafa36c1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:14,691 INFO [StoreOpener-ad54665aa248be3194f85293da98468c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad54665aa248be3194f85293da98468c columnFamilyName cf 2024-12-15T01:34:14,691 DEBUG [StoreOpener-ad54665aa248be3194f85293da98468c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:14,691 INFO [StoreOpener-31ee6c3058cfcc8af5576ab0cafa36c1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31ee6c3058cfcc8af5576ab0cafa36c1 columnFamilyName cf 2024-12-15T01:34:14,692 DEBUG [StoreOpener-31ee6c3058cfcc8af5576ab0cafa36c1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:14,692 INFO [StoreOpener-ad54665aa248be3194f85293da98468c-1 {}] regionserver.HStore(327): Store=ad54665aa248be3194f85293da98468c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:14,692 INFO [StoreOpener-31ee6c3058cfcc8af5576ab0cafa36c1-1 {}] regionserver.HStore(327): Store=31ee6c3058cfcc8af5576ab0cafa36c1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:14,693 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c 2024-12-15T01:34:14,693 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c 2024-12-15T01:34:14,694 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:14,694 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:14,696 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for ad54665aa248be3194f85293da98468c 2024-12-15T01:34:14,696 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:14,699 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:14,699 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:14,700 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened ad54665aa248be3194f85293da98468c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70186194, jitterRate=0.04585579037666321}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:14,700 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 31ee6c3058cfcc8af5576ab0cafa36c1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68370923, jitterRate=0.01880614459514618}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:14,701 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 31ee6c3058cfcc8af5576ab0cafa36c1: 2024-12-15T01:34:14,701 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for ad54665aa248be3194f85293da98468c: 2024-12-15T01:34:14,702 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c., pid=33, masterSystemTime=1734226454680 2024-12-15T01:34:14,702 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1., pid=34, masterSystemTime=1734226454681 2024-12-15T01:34:14,705 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:14,706 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:14,706 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=ad54665aa248be3194f85293da98468c, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:34:14,708 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:14,708 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:14,710 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=31ee6c3058cfcc8af5576ab0cafa36c1, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:14,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=31 2024-12-15T01:34:14,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=31, state=SUCCESS; OpenRegionProcedure ad54665aa248be3194f85293da98468c, server=66d575e28c61,39381,1734226422872 in 182 msec 2024-12-15T01:34:14,715 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=32 2024-12-15T01:34:14,715 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ad54665aa248be3194f85293da98468c, ASSIGN in 342 msec 2024-12-15T01:34:14,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=32, state=SUCCESS; OpenRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1, server=66d575e28c61,35281,1734226422799 in 183 msec 2024-12-15T01:34:14,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=30 2024-12-15T01:34:14,719 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=31ee6c3058cfcc8af5576ab0cafa36c1, ASSIGN in 344 msec 2024-12-15T01:34:14,719 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:34:14,720 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226454720"}]},"ts":"1734226454720"} 2024-12-15T01:34:14,722 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-15T01:34:14,734 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:34:14,734 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-15T01:34:14,737 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-15T01:34:14,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:14,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:14,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:14,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:14,753 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:14,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 441 msec 2024-12-15T01:34:14,876 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-15T01:34:14,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-15T01:34:14,920 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-15T01:34:14,921 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-15T01:34:14,921 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:14,929 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-15T01:34:14,929 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:14,929 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-15T01:34:14,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-15T01:34:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226454934 (current time:1734226454934). 2024-12-15T01:34:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:34:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-15T01:34:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:34:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x36844d09 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@343561a7 2024-12-15T01:34:14,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f2c6a22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:14,953 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:14,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x36844d09 to 127.0.0.1:64248 2024-12-15T01:34:14,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:14,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62845027 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d233d89 2024-12-15T01:34:14,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f22575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:14,979 DEBUG [hconnection-0x4222ebb8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:14,981 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42984, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:14,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:14,985 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55652, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62845027 to 127.0.0.1:64248 2024-12-15T01:34:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-15T01:34:14,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:34:14,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-15T01:34:14,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-15T01:34:14,990 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:34:14,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-15T01:34:14,993 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:34:14,996 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:34:15,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741901_1077 (size=161) 2024-12-15T01:34:15,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741901_1077 (size=161) 2024-12-15T01:34:15,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741901_1077 (size=161) 2024-12-15T01:34:15,015 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:34:15,015 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1}] 2024-12-15T01:34:15,016 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:15,016 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c 2024-12-15T01:34:15,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-15T01:34:15,167 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:15,167 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:15,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-15T01:34:15,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for ad54665aa248be3194f85293da98468c: 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for 31ee6c3058cfcc8af5576ab0cafa36c1: 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:34:15,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:34:15,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741903_1079 (size=68) 2024-12-15T01:34:15,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741903_1079 (size=68) 2024-12-15T01:34:15,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741903_1079 (size=68) 2024-12-15T01:34:15,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:15,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-15T01:34:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-15T01:34:15,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:15,178 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:15,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741902_1078 (size=68) 2024-12-15T01:34:15,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1 in 164 msec 2024-12-15T01:34:15,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741902_1078 (size=68) 2024-12-15T01:34:15,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741902_1078 (size=68) 2024-12-15T01:34:15,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:15,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-15T01:34:15,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-15T01:34:15,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region ad54665aa248be3194f85293da98468c 2024-12-15T01:34:15,183 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c 2024-12-15T01:34:15,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-15T01:34:15,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c in 169 msec 2024-12-15T01:34:15,187 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:34:15,188 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:34:15,188 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:34:15,188 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-15T01:34:15,189 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-15T01:34:15,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741904_1080 (size=543) 2024-12-15T01:34:15,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741904_1080 (size=543) 2024-12-15T01:34:15,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741904_1080 (size=543) 2024-12-15T01:34:15,205 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:34:15,211 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:34:15,212 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-15T01:34:15,215 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:34:15,215 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-15T01:34:15,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 227 msec 2024-12-15T01:34:15,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-15T01:34:15,295 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-15T01:34:15,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:15,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:15,311 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-15T01:34:15,311 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:15,311 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:15,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-15T01:34:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226455328 (current time:1734226455328). 2024-12-15T01:34:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:34:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-15T01:34:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:34:15,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26557c32 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@737f2c6b 2024-12-15T01:34:15,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d5326a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:15,344 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26557c32 to 127.0.0.1:64248 2024-12-15T01:34:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19259f7f to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@146912b0 2024-12-15T01:34:15,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@182aae30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:15,363 DEBUG [hconnection-0x448ac317-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:15,364 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:15,367 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55658, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19259f7f to 127.0.0.1:64248 2024-12-15T01:34:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:15,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-15T01:34:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:34:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-15T01:34:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-15T01:34:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-15T01:34:15,373 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:34:15,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:34:15,377 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:34:15,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741905_1081 (size=156) 2024-12-15T01:34:15,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741905_1081 (size=156) 2024-12-15T01:34:15,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741905_1081 (size=156) 2024-12-15T01:34:15,388 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:34:15,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1}] 2024-12-15T01:34:15,390 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:15,390 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c 2024-12-15T01:34:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-15T01:34:15,541 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:15,541 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:15,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-15T01:34:15,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-15T01:34:15,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:15,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:15,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing ad54665aa248be3194f85293da98468c 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-15T01:34:15,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 31ee6c3058cfcc8af5576ab0cafa36c1 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-15T01:34:15,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/.tmp/cf/8f8512e5fad74358b6b68563875444c4 is 71, key is 079e303d92f96bfc12d364bb5c10e528/cf:q/1734226455304/Put/seqid=0 2024-12-15T01:34:15,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/.tmp/cf/bb9eaba06b1d416486a4381d72beef8a is 71, key is 1538d0864599241c210d2d9f3868719a/cf:q/1734226455306/Put/seqid=0 2024-12-15T01:34:15,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741906_1082 (size=5288) 2024-12-15T01:34:15,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741906_1082 (size=5288) 2024-12-15T01:34:15,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741906_1082 (size=5288) 2024-12-15T01:34:15,579 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/.tmp/cf/8f8512e5fad74358b6b68563875444c4 2024-12-15T01:34:15,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741907_1083 (size=8326) 2024-12-15T01:34:15,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741907_1083 (size=8326) 2024-12-15T01:34:15,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741907_1083 (size=8326) 2024-12-15T01:34:15,584 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/.tmp/cf/bb9eaba06b1d416486a4381d72beef8a 2024-12-15T01:34:15,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/.tmp/cf/8f8512e5fad74358b6b68563875444c4 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/cf/8f8512e5fad74358b6b68563875444c4 2024-12-15T01:34:15,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/.tmp/cf/bb9eaba06b1d416486a4381d72beef8a as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/cf/bb9eaba06b1d416486a4381d72beef8a 2024-12-15T01:34:15,601 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/cf/8f8512e5fad74358b6b68563875444c4, entries=3, sequenceid=6, filesize=5.2 K 2024-12-15T01:34:15,602 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ad54665aa248be3194f85293da98468c in 60ms, sequenceid=6, compaction requested=false 2024-12-15T01:34:15,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for ad54665aa248be3194f85293da98468c: 2024-12-15T01:34:15,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. for snaptb0-testExportWithResetTtl completed. 2024-12-15T01:34:15,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-15T01:34:15,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:15,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/cf/8f8512e5fad74358b6b68563875444c4] hfiles 2024-12-15T01:34:15,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/cf/8f8512e5fad74358b6b68563875444c4 for snapshot=snaptb0-testExportWithResetTtl 2024-12-15T01:34:15,605 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/cf/bb9eaba06b1d416486a4381d72beef8a, entries=47, sequenceid=6, filesize=8.1 K 2024-12-15T01:34:15,607 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 31ee6c3058cfcc8af5576ab0cafa36c1 in 64ms, sequenceid=6, compaction requested=false 2024-12-15T01:34:15,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 31ee6c3058cfcc8af5576ab0cafa36c1: 2024-12-15T01:34:15,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. for snaptb0-testExportWithResetTtl completed. 2024-12-15T01:34:15,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-15T01:34:15,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:15,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/cf/bb9eaba06b1d416486a4381d72beef8a] hfiles 2024-12-15T01:34:15,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/cf/bb9eaba06b1d416486a4381d72beef8a for snapshot=snaptb0-testExportWithResetTtl 2024-12-15T01:34:15,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741908_1084 (size=107) 2024-12-15T01:34:15,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741908_1084 (size=107) 2024-12-15T01:34:15,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741908_1084 (size=107) 2024-12-15T01:34:15,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:15,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-15T01:34:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-15T01:34:15,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region ad54665aa248be3194f85293da98468c 2024-12-15T01:34:15,622 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c 2024-12-15T01:34:15,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure ad54665aa248be3194f85293da98468c in 235 msec 2024-12-15T01:34:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741909_1085 (size=107) 2024-12-15T01:34:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741909_1085 (size=107) 2024-12-15T01:34:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741909_1085 (size=107) 2024-12-15T01:34:15,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:15,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-15T01:34:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-15T01:34:15,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:15,630 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:15,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-15T01:34:15,633 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:34:15,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1 in 243 msec 2024-12-15T01:34:15,634 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:34:15,635 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:34:15,635 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-15T01:34:15,636 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-15T01:34:15,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741910_1086 (size=621) 2024-12-15T01:34:15,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741910_1086 (size=621) 2024-12-15T01:34:15,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741910_1086 (size=621) 2024-12-15T01:34:15,653 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:34:15,660 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:34:15,661 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-15T01:34:15,666 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:34:15,666 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-15T01:34:15,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 296 msec 2024-12-15T01:34:15,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-15T01:34:15,676 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-15T01:34:15,678 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:34:15,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:15,685 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:34:15,685 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:15,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-15T01:34:15,687 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:34:15,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T01:34:15,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741911_1087 (size=397) 2024-12-15T01:34:15,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741911_1087 (size=397) 2024-12-15T01:34:15,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741911_1087 (size=397) 2024-12-15T01:34:15,726 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 41dbec9b53201b07e62c7c16b8e37cc5, NAME => 'testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:15,726 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => e6b4d731ac38c5f7608d8bba9e49c311, NAME => 'testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:15,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741912_1088 (size=58) 2024-12-15T01:34:15,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741912_1088 (size=58) 2024-12-15T01:34:15,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741912_1088 (size=58) 2024-12-15T01:34:15,746 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:15,746 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 41dbec9b53201b07e62c7c16b8e37cc5, disabling compactions & flushes 2024-12-15T01:34:15,746 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:15,747 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:15,747 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. after waiting 0 ms 2024-12-15T01:34:15,747 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:15,747 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:15,747 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 41dbec9b53201b07e62c7c16b8e37cc5: 2024-12-15T01:34:15,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741913_1089 (size=58) 2024-12-15T01:34:15,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741913_1089 (size=58) 2024-12-15T01:34:15,757 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:15,757 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing e6b4d731ac38c5f7608d8bba9e49c311, disabling compactions & flushes 2024-12-15T01:34:15,757 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:15,757 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:15,757 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. after waiting 0 ms 2024-12-15T01:34:15,757 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:15,757 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:15,757 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for e6b4d731ac38c5f7608d8bba9e49c311: 2024-12-15T01:34:15,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741913_1089 (size=58) 2024-12-15T01:34:15,758 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:34:15,758 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734226455758"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226455758"}]},"ts":"1734226455758"} 2024-12-15T01:34:15,759 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734226455758"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226455758"}]},"ts":"1734226455758"} 2024-12-15T01:34:15,763 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:34:15,764 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:34:15,765 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226455764"}]},"ts":"1734226455764"} 2024-12-15T01:34:15,767 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-15T01:34:15,786 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:34:15,788 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:34:15,788 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:34:15,788 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:34:15,788 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:34:15,788 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:34:15,788 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:34:15,788 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:34:15,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=41dbec9b53201b07e62c7c16b8e37cc5, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=e6b4d731ac38c5f7608d8bba9e49c311, ASSIGN}] 2024-12-15T01:34:15,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T01:34:15,791 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=41dbec9b53201b07e62c7c16b8e37cc5, ASSIGN 2024-12-15T01:34:15,792 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=e6b4d731ac38c5f7608d8bba9e49c311, ASSIGN 2024-12-15T01:34:15,793 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e6b4d731ac38c5f7608d8bba9e49c311, ASSIGN; state=OFFLINE, location=66d575e28c61,42663,1734226422715; forceNewPlan=false, retain=false 2024-12-15T01:34:15,793 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=41dbec9b53201b07e62c7c16b8e37cc5, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:34:15,944 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:34:15,944 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=e6b4d731ac38c5f7608d8bba9e49c311, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:15,944 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=41dbec9b53201b07e62c7c16b8e37cc5, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:15,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; OpenRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:34:15,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=42, state=RUNNABLE; OpenRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:34:15,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T01:34:16,100 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:16,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:16,104 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:16,105 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => e6b4d731ac38c5f7608d8bba9e49c311, NAME => 'testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:34:16,105 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. service=AccessControlService 2024-12-15T01:34:16,105 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:16,105 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:16,106 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:16,106 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:16,106 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:16,106 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:16,106 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 41dbec9b53201b07e62c7c16b8e37cc5, NAME => 'testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:34:16,106 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. service=AccessControlService 2024-12-15T01:34:16,106 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:16,107 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:16,107 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:16,107 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:16,107 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:16,107 INFO [StoreOpener-e6b4d731ac38c5f7608d8bba9e49c311-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:16,108 INFO [StoreOpener-41dbec9b53201b07e62c7c16b8e37cc5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:16,109 INFO [StoreOpener-e6b4d731ac38c5f7608d8bba9e49c311-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e6b4d731ac38c5f7608d8bba9e49c311 columnFamilyName cf 2024-12-15T01:34:16,109 DEBUG [StoreOpener-e6b4d731ac38c5f7608d8bba9e49c311-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:16,109 INFO [StoreOpener-e6b4d731ac38c5f7608d8bba9e49c311-1 {}] regionserver.HStore(327): Store=e6b4d731ac38c5f7608d8bba9e49c311/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:16,110 INFO [StoreOpener-41dbec9b53201b07e62c7c16b8e37cc5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41dbec9b53201b07e62c7c16b8e37cc5 columnFamilyName cf 2024-12-15T01:34:16,110 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:16,110 DEBUG [StoreOpener-41dbec9b53201b07e62c7c16b8e37cc5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:16,110 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:16,111 INFO [StoreOpener-41dbec9b53201b07e62c7c16b8e37cc5-1 {}] regionserver.HStore(327): Store=41dbec9b53201b07e62c7c16b8e37cc5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:16,112 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:16,112 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:16,113 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:16,114 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:16,116 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:16,117 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T01:34:16,117 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened e6b4d731ac38c5f7608d8bba9e49c311; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68601366, jitterRate=0.022240012884140015}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:16,117 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:16,118 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for e6b4d731ac38c5f7608d8bba9e49c311: 2024-12-15T01:34:16,118 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 41dbec9b53201b07e62c7c16b8e37cc5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64016879, jitterRate=-0.046074166893959045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:16,118 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 41dbec9b53201b07e62c7c16b8e37cc5: 2024-12-15T01:34:16,118 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T01:34:16,119 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311., pid=44, masterSystemTime=1734226456100 2024-12-15T01:34:16,120 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5., pid=45, masterSystemTime=1734226456102 2024-12-15T01:34:16,121 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:16,121 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:16,122 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=e6b4d731ac38c5f7608d8bba9e49c311, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:16,122 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T01:34:16,122 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:16,122 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:16,123 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T01:34:16,124 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=41dbec9b53201b07e62c7c16b8e37cc5, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:16,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-15T01:34:16,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; OpenRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311, server=66d575e28c61,42663,1734226422715 in 178 msec 2024-12-15T01:34:16,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=e6b4d731ac38c5f7608d8bba9e49c311, ASSIGN in 338 msec 2024-12-15T01:34:16,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=42 2024-12-15T01:34:16,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=42, state=SUCCESS; OpenRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5, server=66d575e28c61,35281,1734226422799 in 177 msec 2024-12-15T01:34:16,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-15T01:34:16,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=41dbec9b53201b07e62c7c16b8e37cc5, ASSIGN in 339 msec 2024-12-15T01:34:16,129 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:34:16,129 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226456129"}]},"ts":"1734226456129"} 2024-12-15T01:34:16,131 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-15T01:34:16,228 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:34:16,229 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-15T01:34:16,232 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-15T01:34:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T01:34:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:16,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:16,396 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,396 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,397 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,397 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,397 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,397 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:16,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 717 msec 2024-12-15T01:34:16,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T01:34:16,797 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-15T01:34:16,797 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-15T01:34:16,797 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:16,804 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-15T01:34:16,804 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:16,805 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-15T01:34:16,818 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:16,821 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:16,826 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-15T01:34:16,826 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:16,826 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:16,844 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-15T01:34:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226456844 (current time:1734226456844). 2024-12-15T01:34:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-15T01:34:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:34:16,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04b5f145 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4a8e4e8b 2024-12-15T01:34:17,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1da68dfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:17,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:17,005 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04b5f145 to 127.0.0.1:64248 2024-12-15T01:34:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:17,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2129991b to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@196b61a3 2024-12-15T01:34:17,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@308bf629, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:17,063 DEBUG [hconnection-0x51214516-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:17,064 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:17,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:17,067 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55672, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2129991b to 127.0.0.1:64248 2024-12-15T01:34:17,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:17,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-15T01:34:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:34:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-15T01:34:17,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-15T01:34:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-15T01:34:17,075 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:34:17,076 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:34:17,079 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:34:17,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741914_1090 (size=143) 2024-12-15T01:34:17,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741914_1090 (size=143) 2024-12-15T01:34:17,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741914_1090 (size=143) 2024-12-15T01:34:17,092 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:34:17,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311}] 2024-12-15T01:34:17,094 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:17,094 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-15T01:34:17,245 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:17,245 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:17,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-15T01:34:17,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-15T01:34:17,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:17,246 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing 41dbec9b53201b07e62c7c16b8e37cc5 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-15T01:34:17,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:17,247 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing e6b4d731ac38c5f7608d8bba9e49c311 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-15T01:34:17,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/.tmp/cf/02e0628a7d04443abaa6b503c405935b is 71, key is 058217075ccaf380a63650ccde82d4c2/cf:q/1734226456818/Put/seqid=0 2024-12-15T01:34:17,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/.tmp/cf/d031b96cd9c143f8b547c5f55f1f4fad is 71, key is 1b21575cf50396d7d7698ee8cc06174b/cf:q/1734226456820/Put/seqid=0 2024-12-15T01:34:17,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741915_1091 (size=5286) 2024-12-15T01:34:17,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741915_1091 (size=5286) 2024-12-15T01:34:17,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741915_1091 (size=5286) 2024-12-15T01:34:17,276 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/.tmp/cf/02e0628a7d04443abaa6b503c405935b 2024-12-15T01:34:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741916_1092 (size=8324) 2024-12-15T01:34:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741916_1092 (size=8324) 2024-12-15T01:34:17,278 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/.tmp/cf/d031b96cd9c143f8b547c5f55f1f4fad 2024-12-15T01:34:17,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741916_1092 (size=8324) 2024-12-15T01:34:17,283 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/.tmp/cf/02e0628a7d04443abaa6b503c405935b as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/cf/02e0628a7d04443abaa6b503c405935b 2024-12-15T01:34:17,291 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/cf/02e0628a7d04443abaa6b503c405935b, entries=3, sequenceid=5, filesize=5.2 K 2024-12-15T01:34:17,292 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 41dbec9b53201b07e62c7c16b8e37cc5 in 46ms, sequenceid=5, compaction requested=false 2024-12-15T01:34:17,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-15T01:34:17,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for 41dbec9b53201b07e62c7c16b8e37cc5: 2024-12-15T01:34:17,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. for snaptb-testExportWithResetTtl completed. 2024-12-15T01:34:17,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-15T01:34:17,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:17,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/cf/02e0628a7d04443abaa6b503c405935b] hfiles 2024-12-15T01:34:17,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/cf/02e0628a7d04443abaa6b503c405935b for snapshot=snaptb-testExportWithResetTtl 2024-12-15T01:34:17,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/.tmp/cf/d031b96cd9c143f8b547c5f55f1f4fad as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/cf/d031b96cd9c143f8b547c5f55f1f4fad 2024-12-15T01:34:17,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741917_1093 (size=100) 2024-12-15T01:34:17,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741917_1093 (size=100) 2024-12-15T01:34:17,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741917_1093 (size=100) 2024-12-15T01:34:17,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:17,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-15T01:34:17,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-15T01:34:17,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:17,312 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:17,313 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/cf/d031b96cd9c143f8b547c5f55f1f4fad, entries=47, sequenceid=5, filesize=8.1 K 2024-12-15T01:34:17,314 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for e6b4d731ac38c5f7608d8bba9e49c311 in 67ms, sequenceid=5, compaction requested=false 2024-12-15T01:34:17,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for e6b4d731ac38c5f7608d8bba9e49c311: 2024-12-15T01:34:17,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. for snaptb-testExportWithResetTtl completed. 2024-12-15T01:34:17,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-15T01:34:17,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:17,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/cf/d031b96cd9c143f8b547c5f55f1f4fad] hfiles 2024-12-15T01:34:17,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/cf/d031b96cd9c143f8b547c5f55f1f4fad for snapshot=snaptb-testExportWithResetTtl 2024-12-15T01:34:17,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5 in 221 msec 2024-12-15T01:34:17,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741918_1094 (size=100) 2024-12-15T01:34:17,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741918_1094 (size=100) 2024-12-15T01:34:17,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741918_1094 (size=100) 2024-12-15T01:34:17,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:17,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-15T01:34:17,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-15T01:34:17,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:17,327 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:17,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-12-15T01:34:17,330 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:34:17,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311 in 236 msec 2024-12-15T01:34:17,331 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:34:17,332 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:34:17,332 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-15T01:34:17,333 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-15T01:34:17,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741919_1095 (size=600) 2024-12-15T01:34:17,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741919_1095 (size=600) 2024-12-15T01:34:17,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741919_1095 (size=600) 2024-12-15T01:34:17,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-15T01:34:17,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-15T01:34:17,755 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:34:17,766 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:34:17,766 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-15T01:34:17,768 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:34:17,768 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-15T01:34:17,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 698 msec 2024-12-15T01:34:17,852 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0001/container_1734226430009_0001_01_000001/launch_container.sh] 2024-12-15T01:34:17,852 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0001/container_1734226430009_0001_01_000001/container_tokens] 2024-12-15T01:34:17,852 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0001/container_1734226430009_0001_01_000001/sysfs] 2024-12-15T01:34:17,853 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0001_000001 (auth:SIMPLE) from 127.0.0.1:43056 2024-12-15T01:34:18,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-15T01:34:18,177 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-15T01:34:18,188 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188 2024-12-15T01:34:18,188 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:18,225 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:18,225 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-15T01:34:18,228 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:34:18,239 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-15T01:34:18,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741920_1096 (size=600) 2024-12-15T01:34:18,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741920_1096 (size=600) 2024-12-15T01:34:18,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741920_1096 (size=600) 2024-12-15T01:34:18,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741921_1097 (size=143) 2024-12-15T01:34:18,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741921_1097 (size=143) 2024-12-15T01:34:18,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741921_1097 (size=143) 2024-12-15T01:34:18,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741922_1098 (size=141) 2024-12-15T01:34:18,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741922_1098 (size=141) 2024-12-15T01:34:18,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741922_1098 (size=141) 2024-12-15T01:34:18,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-14125065283999589192.jar 2024-12-15T01:34:18,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:18,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:18,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,122 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T01:34:19,123 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T01:34:19,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-11832721896111327726.jar 2024-12-15T01:34:19,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-13855044963991327191.jar 2024-12-15T01:34:19,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,408 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,409 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,410 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:19,410 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:34:19,410 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:34:19,410 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:34:19,411 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:34:19,411 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:34:19,411 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:34:19,411 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:34:19,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:34:19,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:34:19,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:34:19,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:34:19,412 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:34:19,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:19,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:19,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:19,413 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:19,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:19,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:19,414 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:19,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741923_1099 (size=451756) 2024-12-15T01:34:19,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741923_1099 (size=451756) 2024-12-15T01:34:19,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741923_1099 (size=451756) 2024-12-15T01:34:19,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741924_1100 (size=127628) 2024-12-15T01:34:19,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741924_1100 (size=127628) 2024-12-15T01:34:19,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741924_1100 (size=127628) 2024-12-15T01:34:19,532 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:34:19,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741925_1101 (size=2172137) 2024-12-15T01:34:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741925_1101 (size=2172137) 2024-12-15T01:34:19,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741925_1101 (size=2172137) 2024-12-15T01:34:19,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741926_1102 (size=213228) 2024-12-15T01:34:19,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741926_1102 (size=213228) 2024-12-15T01:34:19,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741926_1102 (size=213228) 2024-12-15T01:34:19,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741927_1103 (size=1877034) 2024-12-15T01:34:19,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741927_1103 (size=1877034) 2024-12-15T01:34:19,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741927_1103 (size=1877034) 2024-12-15T01:34:19,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741928_1104 (size=533455) 2024-12-15T01:34:19,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741928_1104 (size=533455) 2024-12-15T01:34:19,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741928_1104 (size=533455) 2024-12-15T01:34:19,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741929_1105 (size=7280644) 2024-12-15T01:34:19,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741929_1105 (size=7280644) 2024-12-15T01:34:19,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741929_1105 (size=7280644) 2024-12-15T01:34:19,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741930_1106 (size=4188619) 2024-12-15T01:34:19,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741930_1106 (size=4188619) 2024-12-15T01:34:19,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741930_1106 (size=4188619) 2024-12-15T01:34:19,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741931_1107 (size=20406) 2024-12-15T01:34:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741931_1107 (size=20406) 2024-12-15T01:34:19,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741931_1107 (size=20406) 2024-12-15T01:34:19,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741932_1108 (size=75495) 2024-12-15T01:34:19,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741932_1108 (size=75495) 2024-12-15T01:34:19,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741932_1108 (size=75495) 2024-12-15T01:34:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741933_1109 (size=45609) 2024-12-15T01:34:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741933_1109 (size=45609) 2024-12-15T01:34:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741933_1109 (size=45609) 2024-12-15T01:34:19,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741934_1110 (size=110084) 2024-12-15T01:34:19,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741934_1110 (size=110084) 2024-12-15T01:34:19,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741934_1110 (size=110084) 2024-12-15T01:34:19,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741935_1111 (size=1323991) 2024-12-15T01:34:19,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741935_1111 (size=1323991) 2024-12-15T01:34:19,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741935_1111 (size=1323991) 2024-12-15T01:34:19,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741936_1112 (size=23076) 2024-12-15T01:34:19,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741936_1112 (size=23076) 2024-12-15T01:34:19,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741936_1112 (size=23076) 2024-12-15T01:34:19,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741937_1113 (size=126803) 2024-12-15T01:34:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741937_1113 (size=126803) 2024-12-15T01:34:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741937_1113 (size=126803) 2024-12-15T01:34:19,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741938_1114 (size=322274) 2024-12-15T01:34:19,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741938_1114 (size=322274) 2024-12-15T01:34:19,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741938_1114 (size=322274) 2024-12-15T01:34:19,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741939_1115 (size=1832290) 2024-12-15T01:34:19,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741939_1115 (size=1832290) 2024-12-15T01:34:19,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741939_1115 (size=1832290) 2024-12-15T01:34:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741940_1116 (size=30081) 2024-12-15T01:34:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741940_1116 (size=30081) 2024-12-15T01:34:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741940_1116 (size=30081) 2024-12-15T01:34:19,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741941_1117 (size=53616) 2024-12-15T01:34:19,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741941_1117 (size=53616) 2024-12-15T01:34:19,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741941_1117 (size=53616) 2024-12-15T01:34:20,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741942_1118 (size=6350909) 2024-12-15T01:34:20,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741942_1118 (size=6350909) 2024-12-15T01:34:20,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741942_1118 (size=6350909) 2024-12-15T01:34:20,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741943_1119 (size=29229) 2024-12-15T01:34:20,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741943_1119 (size=29229) 2024-12-15T01:34:20,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741943_1119 (size=29229) 2024-12-15T01:34:20,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741944_1120 (size=169089) 2024-12-15T01:34:20,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741944_1120 (size=169089) 2024-12-15T01:34:20,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741944_1120 (size=169089) 2024-12-15T01:34:20,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741945_1121 (size=912094) 2024-12-15T01:34:20,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741945_1121 (size=912094) 2024-12-15T01:34:20,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741945_1121 (size=912094) 2024-12-15T01:34:20,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741946_1122 (size=5175431) 2024-12-15T01:34:20,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741946_1122 (size=5175431) 2024-12-15T01:34:20,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741946_1122 (size=5175431) 2024-12-15T01:34:20,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741947_1123 (size=136454) 2024-12-15T01:34:20,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741947_1123 (size=136454) 2024-12-15T01:34:20,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741947_1123 (size=136454) 2024-12-15T01:34:20,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741948_1124 (size=3317408) 2024-12-15T01:34:20,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741948_1124 (size=3317408) 2024-12-15T01:34:20,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741948_1124 (size=3317408) 2024-12-15T01:34:20,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741949_1125 (size=503880) 2024-12-15T01:34:20,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741949_1125 (size=503880) 2024-12-15T01:34:20,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741949_1125 (size=503880) 2024-12-15T01:34:20,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741950_1126 (size=4695811) 2024-12-15T01:34:20,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741950_1126 (size=4695811) 2024-12-15T01:34:20,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741950_1126 (size=4695811) 2024-12-15T01:34:20,294 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:34:20,299 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-15T01:34:20,303 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:34:20,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741951_1127 (size=324) 2024-12-15T01:34:20,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741951_1127 (size=324) 2024-12-15T01:34:20,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741951_1127 (size=324) 2024-12-15T01:34:20,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741952_1128 (size=15) 2024-12-15T01:34:20,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741952_1128 (size=15) 2024-12-15T01:34:20,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741952_1128 (size=15) 2024-12-15T01:34:20,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741953_1129 (size=304931) 2024-12-15T01:34:20,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741953_1129 (size=304931) 2024-12-15T01:34:20,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741953_1129 (size=304931) 2024-12-15T01:34:20,385 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:34:20,385 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:34:20,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0002_000001 (auth:SIMPLE) from 127.0.0.1:55902 2024-12-15T01:34:22,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-15T01:34:22,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-15T01:34:22,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-15T01:34:22,294 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-15T01:34:22,295 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-15T01:34:27,078 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0002_000001 (auth:SIMPLE) from 127.0.0.1:35412 2024-12-15T01:34:27,455 INFO [master/66d575e28c61:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-15T01:34:27,455 INFO [master/66d575e28c61:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-15T01:34:27,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741954_1130 (size=350605) 2024-12-15T01:34:27,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741954_1130 (size=350605) 2024-12-15T01:34:27,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741954_1130 (size=350605) 2024-12-15T01:34:27,797 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:34:29,334 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0002_000001 (auth:SIMPLE) from 127.0.0.1:55912 2024-12-15T01:34:32,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741955_1131 (size=8324) 2024-12-15T01:34:32,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741955_1131 (size=8324) 2024-12-15T01:34:32,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741955_1131 (size=8324) 2024-12-15T01:34:32,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741956_1132 (size=5286) 2024-12-15T01:34:32,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741956_1132 (size=5286) 2024-12-15T01:34:32,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741956_1132 (size=5286) 2024-12-15T01:34:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741957_1133 (size=17398) 2024-12-15T01:34:32,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741957_1133 (size=17398) 2024-12-15T01:34:32,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741957_1133 (size=17398) 2024-12-15T01:34:32,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741958_1134 (size=461) 2024-12-15T01:34:32,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741958_1134 (size=461) 2024-12-15T01:34:32,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741958_1134 (size=461) 2024-12-15T01:34:32,586 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0002/container_1734226430009_0002_01_000002/launch_container.sh] 2024-12-15T01:34:32,586 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0002/container_1734226430009_0002_01_000002/container_tokens] 2024-12-15T01:34:32,586 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0002/container_1734226430009_0002_01_000002/sysfs] 2024-12-15T01:34:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741959_1135 (size=17398) 2024-12-15T01:34:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741959_1135 (size=17398) 2024-12-15T01:34:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741959_1135 (size=17398) 2024-12-15T01:34:32,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741960_1136 (size=350605) 2024-12-15T01:34:32,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741960_1136 (size=350605) 2024-12-15T01:34:32,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741960_1136 (size=350605) 2024-12-15T01:34:32,678 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0002_000001 (auth:SIMPLE) from 127.0.0.1:60690 2024-12-15T01:34:34,596 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:34:34,598 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:34:34,605 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-15T01:34:34,606 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:34:34,606 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:34:34,606 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-15T01:34:34,607 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-15T01:34:34,607 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-15T01:34:34,607 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-15T01:34:34,607 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-15T01:34:34,607 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226458188/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-15T01:34:34,616 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-15T01:34:34,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-15T01:34:34,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:34,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T01:34:34,620 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226474620"}]},"ts":"1734226474620"} 2024-12-15T01:34:34,622 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-15T01:34:34,665 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-15T01:34:34,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-15T01:34:34,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=41dbec9b53201b07e62c7c16b8e37cc5, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=e6b4d731ac38c5f7608d8bba9e49c311, UNASSIGN}] 2024-12-15T01:34:34,669 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=e6b4d731ac38c5f7608d8bba9e49c311, UNASSIGN 2024-12-15T01:34:34,670 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=41dbec9b53201b07e62c7c16b8e37cc5, UNASSIGN 2024-12-15T01:34:34,671 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=e6b4d731ac38c5f7608d8bba9e49c311, regionState=CLOSING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:34,671 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=41dbec9b53201b07e62c7c16b8e37cc5, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:34,673 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:34,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:34:34,674 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:34,674 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:34:34,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T01:34:34,825 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:34,826 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:34,826 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:34,826 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:34,826 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing e6b4d731ac38c5f7608d8bba9e49c311, disabling compactions & flushes 2024-12-15T01:34:34,827 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. after waiting 0 ms 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:34,827 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 41dbec9b53201b07e62c7c16b8e37cc5, disabling compactions & flushes 2024-12-15T01:34:34,827 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. after waiting 0 ms 2024-12-15T01:34:34,827 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:34,832 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-15T01:34:34,832 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-15T01:34:34,833 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:34,833 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:34,833 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5. 2024-12-15T01:34:34,833 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311. 2024-12-15T01:34:34,833 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 41dbec9b53201b07e62c7c16b8e37cc5: 2024-12-15T01:34:34,833 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for e6b4d731ac38c5f7608d8bba9e49c311: 2024-12-15T01:34:34,835 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:34,836 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=e6b4d731ac38c5f7608d8bba9e49c311, regionState=CLOSED 2024-12-15T01:34:34,839 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=41dbec9b53201b07e62c7c16b8e37cc5, regionState=CLOSED 2024-12-15T01:34:34,840 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:34,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-15T01:34:34,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; CloseRegionProcedure e6b4d731ac38c5f7608d8bba9e49c311, server=66d575e28c61,42663,1734226422715 in 165 msec 2024-12-15T01:34:34,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=e6b4d731ac38c5f7608d8bba9e49c311, UNASSIGN in 175 msec 2024-12-15T01:34:34,845 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=51 2024-12-15T01:34:34,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=51, state=SUCCESS; CloseRegionProcedure 41dbec9b53201b07e62c7c16b8e37cc5, server=66d575e28c61,35281,1734226422799 in 170 msec 2024-12-15T01:34:34,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=51, resume processing ppid=50 2024-12-15T01:34:34,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=41dbec9b53201b07e62c7c16b8e37cc5, UNASSIGN in 177 msec 2024-12-15T01:34:34,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-15T01:34:34,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 183 msec 2024-12-15T01:34:34,853 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226474852"}]},"ts":"1734226474852"} 2024-12-15T01:34:34,855 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-15T01:34:34,869 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-15T01:34:34,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 253 msec 2024-12-15T01:34:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T01:34:34,923 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-15T01:34:34,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-15T01:34:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:34,926 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-15T01:34:34,927 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:34,928 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-15T01:34:34,932 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:34,932 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:34,934 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/recovered.edits] 2024-12-15T01:34:34,934 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/recovered.edits] 2024-12-15T01:34:34,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-15T01:34:34,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-15T01:34:34,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-15T01:34:34,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-15T01:34:34,946 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-15T01:34:34,946 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-15T01:34:34,947 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/cf/02e0628a7d04443abaa6b503c405935b to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/cf/02e0628a7d04443abaa6b503c405935b 2024-12-15T01:34:34,947 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/cf/d031b96cd9c143f8b547c5f55f1f4fad to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/cf/d031b96cd9c143f8b547c5f55f1f4fad 2024-12-15T01:34:34,955 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/recovered.edits/8.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5/recovered.edits/8.seqid 2024-12-15T01:34:34,956 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/41dbec9b53201b07e62c7c16b8e37cc5 2024-12-15T01:34:34,956 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/recovered.edits/8.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311/recovered.edits/8.seqid 2024-12-15T01:34:34,957 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportWithResetTtl/e6b4d731ac38c5f7608d8bba9e49c311 2024-12-15T01:34:34,957 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-15T01:34:34,960 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:34,964 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-15T01:34:34,972 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-15T01:34:34,974 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:34,974 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-15T01:34:34,975 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226474974"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:34,975 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226474974"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:34,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-15T01:34:34,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:34,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:34,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-15T01:34:34,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:34,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:34,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-15T01:34:34,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-15T01:34:34,982 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:34,982 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:34,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T01:34:34,983 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:34:34,983 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 41dbec9b53201b07e62c7c16b8e37cc5, NAME => 'testExportWithResetTtl,,1734226455678.41dbec9b53201b07e62c7c16b8e37cc5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e6b4d731ac38c5f7608d8bba9e49c311, NAME => 'testExportWithResetTtl,1,1734226455678.e6b4d731ac38c5f7608d8bba9e49c311.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:34:34,983 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-15T01:34:34,983 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:34,983 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:34,983 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226474983"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:34,984 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:34,984 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:34,989 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-15T01:34:35,008 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-15T01:34:35,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 84 msec 2024-12-15T01:34:35,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T01:34:35,085 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-15T01:34:35,086 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-15T01:34:35,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-15T01:34:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:35,089 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226475089"}]},"ts":"1734226475089"} 2024-12-15T01:34:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-15T01:34:35,090 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-15T01:34:35,098 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-15T01:34:35,099 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-15T01:34:35,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ad54665aa248be3194f85293da98468c, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=31ee6c3058cfcc8af5576ab0cafa36c1, UNASSIGN}] 2024-12-15T01:34:35,102 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=31ee6c3058cfcc8af5576ab0cafa36c1, UNASSIGN 2024-12-15T01:34:35,102 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ad54665aa248be3194f85293da98468c, UNASSIGN 2024-12-15T01:34:35,103 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=ad54665aa248be3194f85293da98468c, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:34:35,103 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=31ee6c3058cfcc8af5576ab0cafa36c1, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:35,104 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:35,105 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; CloseRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:34:35,105 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:35,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE; CloseRegionProcedure ad54665aa248be3194f85293da98468c, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:34:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-15T01:34:35,256 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:35,257 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:35,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:35,258 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing 31ee6c3058cfcc8af5576ab0cafa36c1, disabling compactions & flushes 2024-12-15T01:34:35,258 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. after waiting 0 ms 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:35,258 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close ad54665aa248be3194f85293da98468c 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing ad54665aa248be3194f85293da98468c, disabling compactions & flushes 2024-12-15T01:34:35,258 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. after waiting 0 ms 2024-12-15T01:34:35,258 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:35,263 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:34:35,264 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:35,264 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1. 2024-12-15T01:34:35,264 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for 31ee6c3058cfcc8af5576ab0cafa36c1: 2024-12-15T01:34:35,266 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed 31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:35,266 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=31ee6c3058cfcc8af5576ab0cafa36c1, regionState=CLOSED 2024-12-15T01:34:35,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:34:35,269 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:35,269 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c. 2024-12-15T01:34:35,269 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for ad54665aa248be3194f85293da98468c: 2024-12-15T01:34:35,273 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed ad54665aa248be3194f85293da98468c 2024-12-15T01:34:35,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-15T01:34:35,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseRegionProcedure 31ee6c3058cfcc8af5576ab0cafa36c1, server=66d575e28c61,35281,1734226422799 in 164 msec 2024-12-15T01:34:35,274 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=ad54665aa248be3194f85293da98468c, regionState=CLOSED 2024-12-15T01:34:35,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=31ee6c3058cfcc8af5576ab0cafa36c1, UNASSIGN in 173 msec 2024-12-15T01:34:35,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=58 2024-12-15T01:34:35,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=58, state=SUCCESS; CloseRegionProcedure ad54665aa248be3194f85293da98468c, server=66d575e28c61,39381,1734226422872 in 170 msec 2024-12-15T01:34:35,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-15T01:34:35,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ad54665aa248be3194f85293da98468c, UNASSIGN in 177 msec 2024-12-15T01:34:35,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-15T01:34:35,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 184 msec 2024-12-15T01:34:35,294 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226475294"}]},"ts":"1734226475294"} 2024-12-15T01:34:35,296 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-15T01:34:35,307 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-15T01:34:35,312 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 225 msec 2024-12-15T01:34:35,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-15T01:34:35,392 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-15T01:34:35,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-15T01:34:35,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:35,394 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:35,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-15T01:34:35,395 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:35,396 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-15T01:34:35,399 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:35,399 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c 2024-12-15T01:34:35,401 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/recovered.edits] 2024-12-15T01:34:35,401 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/recovered.edits] 2024-12-15T01:34:35,405 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/cf/8f8512e5fad74358b6b68563875444c4 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/cf/8f8512e5fad74358b6b68563875444c4 2024-12-15T01:34:35,406 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/cf/bb9eaba06b1d416486a4381d72beef8a to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/cf/bb9eaba06b1d416486a4381d72beef8a 2024-12-15T01:34:35,409 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1/recovered.edits/9.seqid 2024-12-15T01:34:35,409 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c/recovered.edits/9.seqid 2024-12-15T01:34:35,409 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/31ee6c3058cfcc8af5576ab0cafa36c1 2024-12-15T01:34:35,410 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithResetTtl/ad54665aa248be3194f85293da98468c 2024-12-15T01:34:35,410 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-15T01:34:35,413 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:35,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-15T01:34:35,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-15T01:34:35,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-15T01:34:35,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-15T01:34:35,417 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-15T01:34:35,418 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-15T01:34:35,421 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-15T01:34:35,423 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:35,423 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-15T01:34:35,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-15T01:34:35,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:35,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:35,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:35,424 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226475423"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:35,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-15T01:34:35,424 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:35,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-15T01:34:35,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:35,424 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:35,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-15T01:34:35,424 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226475423"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:35,424 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-15T01:34:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-15T01:34:35,427 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:34:35,427 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ad54665aa248be3194f85293da98468c, NAME => 'testtb-testExportWithResetTtl,,1734226454312.ad54665aa248be3194f85293da98468c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 31ee6c3058cfcc8af5576ab0cafa36c1, NAME => 'testtb-testExportWithResetTtl,1,1734226454312.31ee6c3058cfcc8af5576ab0cafa36c1.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:34:35,427 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-15T01:34:35,427 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226475427"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:35,429 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-15T01:34:35,441 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-15T01:34:35,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 49 msec 2024-12-15T01:34:35,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-15T01:34:35,528 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-15T01:34:35,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-15T01:34:35,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-15T01:34:35,544 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-15T01:34:35,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-15T01:34:35,549 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-15T01:34:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-15T01:34:35,574 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=795 (was 777) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:42422 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2015 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x613351a8-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:46752 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:45909 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:52172 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1252221765_1 at /127.0.0.1:52138 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 56333) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45909 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=812 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=528 (was 475) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=4470 (was 4255) - AvailableMemoryMB LEAK? - 2024-12-15T01:34:35,574 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-15T01:34:35,593 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=795, OpenFileDescriptor=812, MaxFileDescriptor=1048576, SystemLoadAverage=528, ProcessCount=17, AvailableMemoryMB=4470 2024-12-15T01:34:35,593 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-15T01:34:35,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:34:35,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:35,597 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:34:35,598 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:35,598 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-15T01:34:35,599 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:34:35,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-15T01:34:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741961_1137 (size=407) 2024-12-15T01:34:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741961_1137 (size=407) 2024-12-15T01:34:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741961_1137 (size=407) 2024-12-15T01:34:35,614 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e44dc9a5390c0a1e71cd37627eb0e45a, NAME => 'testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:35,621 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 25307496659a6b1dcf2983063379568b, NAME => 'testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741962_1138 (size=68) 2024-12-15T01:34:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741962_1138 (size=68) 2024-12-15T01:34:35,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741962_1138 (size=68) 2024-12-15T01:34:35,643 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:35,643 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing e44dc9a5390c0a1e71cd37627eb0e45a, disabling compactions & flushes 2024-12-15T01:34:35,643 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:35,643 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:35,643 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. after waiting 0 ms 2024-12-15T01:34:35,643 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:35,643 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:35,643 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for e44dc9a5390c0a1e71cd37627eb0e45a: 2024-12-15T01:34:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741963_1139 (size=68) 2024-12-15T01:34:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741963_1139 (size=68) 2024-12-15T01:34:35,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741963_1139 (size=68) 2024-12-15T01:34:35,650 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:35,650 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 25307496659a6b1dcf2983063379568b, disabling compactions & flushes 2024-12-15T01:34:35,650 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:35,650 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:35,650 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. after waiting 0 ms 2024-12-15T01:34:35,650 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:35,650 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:35,650 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 25307496659a6b1dcf2983063379568b: 2024-12-15T01:34:35,651 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:34:35,652 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734226475652"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226475652"}]},"ts":"1734226475652"} 2024-12-15T01:34:35,652 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734226475652"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226475652"}]},"ts":"1734226475652"} 2024-12-15T01:34:35,655 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:34:35,656 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:34:35,657 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226475657"}]},"ts":"1734226475657"} 2024-12-15T01:34:35,659 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-15T01:34:35,677 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:34:35,679 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:34:35,679 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:34:35,679 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:34:35,679 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:34:35,679 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:34:35,679 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:34:35,679 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:34:35,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e44dc9a5390c0a1e71cd37627eb0e45a, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=25307496659a6b1dcf2983063379568b, ASSIGN}] 2024-12-15T01:34:35,681 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=25307496659a6b1dcf2983063379568b, ASSIGN 2024-12-15T01:34:35,681 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e44dc9a5390c0a1e71cd37627eb0e45a, ASSIGN 2024-12-15T01:34:35,682 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=25307496659a6b1dcf2983063379568b, ASSIGN; state=OFFLINE, location=66d575e28c61,42663,1734226422715; forceNewPlan=false, retain=false 2024-12-15T01:34:35,682 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e44dc9a5390c0a1e71cd37627eb0e45a, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:34:35,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-15T01:34:35,832 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:34:35,832 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=25307496659a6b1dcf2983063379568b, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:35,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=e44dc9a5390c0a1e71cd37627eb0e45a, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:35,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 25307496659a6b1dcf2983063379568b, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:34:35,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE; OpenRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:34:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-15T01:34:35,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:35,988 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:35,991 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:35,991 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 25307496659a6b1dcf2983063379568b, NAME => 'testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:34:35,991 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => e44dc9a5390c0a1e71cd37627eb0e45a, NAME => 'testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. service=AccessControlService 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. service=AccessControlService 2024-12-15T01:34:35,992 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:35,992 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:35,992 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:35,994 INFO [StoreOpener-e44dc9a5390c0a1e71cd37627eb0e45a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:35,994 INFO [StoreOpener-25307496659a6b1dcf2983063379568b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:35,995 INFO [StoreOpener-e44dc9a5390c0a1e71cd37627eb0e45a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e44dc9a5390c0a1e71cd37627eb0e45a columnFamilyName cf 2024-12-15T01:34:35,995 INFO [StoreOpener-25307496659a6b1dcf2983063379568b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 25307496659a6b1dcf2983063379568b columnFamilyName cf 2024-12-15T01:34:35,996 DEBUG [StoreOpener-e44dc9a5390c0a1e71cd37627eb0e45a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:35,996 DEBUG [StoreOpener-25307496659a6b1dcf2983063379568b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:35,996 INFO [StoreOpener-e44dc9a5390c0a1e71cd37627eb0e45a-1 {}] regionserver.HStore(327): Store=e44dc9a5390c0a1e71cd37627eb0e45a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:35,996 INFO [StoreOpener-25307496659a6b1dcf2983063379568b-1 {}] regionserver.HStore(327): Store=25307496659a6b1dcf2983063379568b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:35,997 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b 2024-12-15T01:34:35,997 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:35,997 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b 2024-12-15T01:34:35,997 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:35,999 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:35,999 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:36,001 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:36,001 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:36,002 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened e44dc9a5390c0a1e71cd37627eb0e45a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62225164, jitterRate=-0.0727728009223938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:36,002 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 25307496659a6b1dcf2983063379568b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73514376, jitterRate=0.09544956684112549}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:36,002 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for e44dc9a5390c0a1e71cd37627eb0e45a: 2024-12-15T01:34:36,002 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 25307496659a6b1dcf2983063379568b: 2024-12-15T01:34:36,003 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b., pid=66, masterSystemTime=1734226475988 2024-12-15T01:34:36,003 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a., pid=67, masterSystemTime=1734226475988 2024-12-15T01:34:36,005 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:36,005 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:36,005 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=e44dc9a5390c0a1e71cd37627eb0e45a, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:36,005 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:36,005 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:36,006 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=25307496659a6b1dcf2983063379568b, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:36,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=64 2024-12-15T01:34:36,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=64, state=SUCCESS; OpenRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a, server=66d575e28c61,35281,1734226422799 in 171 msec 2024-12-15T01:34:36,009 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-15T01:34:36,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 25307496659a6b1dcf2983063379568b, server=66d575e28c61,42663,1734226422715 in 174 msec 2024-12-15T01:34:36,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e44dc9a5390c0a1e71cd37627eb0e45a, ASSIGN in 330 msec 2024-12-15T01:34:36,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=63 2024-12-15T01:34:36,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=25307496659a6b1dcf2983063379568b, ASSIGN in 331 msec 2024-12-15T01:34:36,011 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:34:36,012 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226476011"}]},"ts":"1734226476011"} 2024-12-15T01:34:36,013 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-15T01:34:36,058 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:34:36,058 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-15T01:34:36,060 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-15T01:34:36,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:36,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:36,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:36,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:36,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:36,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:36,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:36,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:36,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 482 msec 2024-12-15T01:34:36,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-15T01:34:36,203 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-15T01:34:36,203 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-15T01:34:36,203 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:36,208 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-15T01:34:36,208 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:36,208 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-15T01:34:36,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:34:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226476213 (current time:1734226476213). 2024-12-15T01:34:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:34:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-15T01:34:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:34:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x289b9127 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d630f8f 2024-12-15T01:34:36,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ec9f430, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:36,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:36,227 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:36,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x289b9127 to 127.0.0.1:64248 2024-12-15T01:34:36,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fd22af6 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3ffa8d6d 2024-12-15T01:34:36,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e4396d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:36,247 DEBUG [hconnection-0x69ef2708-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:36,248 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:36,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:36,251 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fd22af6 to 127.0.0.1:64248 2024-12-15T01:34:36,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:36,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-15T01:34:36,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:34:36,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:34:36,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-15T01:34:36,255 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:34:36,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-15T01:34:36,256 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:34:36,258 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:34:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741964_1140 (size=170) 2024-12-15T01:34:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741964_1140 (size=170) 2024-12-15T01:34:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741964_1140 (size=170) 2024-12-15T01:34:36,269 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:34:36,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b}] 2024-12-15T01:34:36,271 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:36,271 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-15T01:34:36,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:36,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:36,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-15T01:34:36,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for e44dc9a5390c0a1e71cd37627eb0e45a: 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. for emptySnaptb0-testExportFileSystemState completed. 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 25307496659a6b1dcf2983063379568b: 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. for emptySnaptb0-testExportFileSystemState completed. 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:34:36,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:34:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741966_1142 (size=71) 2024-12-15T01:34:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741966_1142 (size=71) 2024-12-15T01:34:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741965_1141 (size=71) 2024-12-15T01:34:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741966_1142 (size=71) 2024-12-15T01:34:36,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741965_1141 (size=71) 2024-12-15T01:34:36,432 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:36,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741965_1141 (size=71) 2024-12-15T01:34:36,433 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-15T01:34:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-15T01:34:36,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:36,433 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:36,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:36,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-15T01:34:36,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a in 165 msec 2024-12-15T01:34:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-15T01:34:36,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:36,438 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:36,440 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=68 2024-12-15T01:34:36,440 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:34:36,440 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b in 168 msec 2024-12-15T01:34:36,440 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:34:36,441 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:34:36,441 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-15T01:34:36,442 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-15T01:34:36,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741967_1143 (size=552) 2024-12-15T01:34:36,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741967_1143 (size=552) 2024-12-15T01:34:36,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741967_1143 (size=552) 2024-12-15T01:34:36,461 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:34:36,468 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:34:36,468 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-15T01:34:36,470 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:34:36,470 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-15T01:34:36,471 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 217 msec 2024-12-15T01:34:36,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-15T01:34:36,559 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-15T01:34:36,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:36,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:36,575 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-15T01:34:36,575 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:36,575 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:36,591 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:34:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226476591 (current time:1734226476591). 2024-12-15T01:34:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:34:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-15T01:34:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:34:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ea99f4e to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5eee26f4 2024-12-15T01:34:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28e62e9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:36,605 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:36,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ea99f4e to 127.0.0.1:64248 2024-12-15T01:34:36,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:36,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x640effc3 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b78bc30 2024-12-15T01:34:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52f3e104, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:36,626 DEBUG [hconnection-0x4e4d5ee4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:36,627 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54554, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:36,630 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x640effc3 to 127.0.0.1:64248 2024-12-15T01:34:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-15T01:34:36,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:34:36,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:34:36,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-15T01:34:36,634 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:34:36,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T01:34:36,634 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:34:36,637 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:34:36,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741968_1144 (size=165) 2024-12-15T01:34:36,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741968_1144 (size=165) 2024-12-15T01:34:36,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741968_1144 (size=165) 2024-12-15T01:34:36,645 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:34:36,645 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b}] 2024-12-15T01:34:36,645 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:36,646 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:36,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T01:34:36,797 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:36,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:36,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-15T01:34:36,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-15T01:34:36,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:36,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:36,798 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing e44dc9a5390c0a1e71cd37627eb0e45a 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-15T01:34:36,798 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing 25307496659a6b1dcf2983063379568b 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-15T01:34:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/.tmp/cf/c834788d195f4520a0de52b09d590d04 is 71, key is 012ae374b3bbe818123557a21d3810f0/cf:q/1734226476568/Put/seqid=0 2024-12-15T01:34:36,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/.tmp/cf/5bdb6afebe4b41e4ac055485d411259b is 71, key is 17c7f346726a7ed441f919895823fcb8/cf:q/1734226476570/Put/seqid=0 2024-12-15T01:34:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741970_1146 (size=8122) 2024-12-15T01:34:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741969_1145 (size=5490) 2024-12-15T01:34:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741970_1146 (size=8122) 2024-12-15T01:34:36,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741970_1146 (size=8122) 2024-12-15T01:34:36,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741969_1145 (size=5490) 2024-12-15T01:34:36,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741969_1145 (size=5490) 2024-12-15T01:34:36,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/.tmp/cf/5bdb6afebe4b41e4ac055485d411259b 2024-12-15T01:34:36,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/.tmp/cf/c834788d195f4520a0de52b09d590d04 2024-12-15T01:34:36,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/.tmp/cf/5bdb6afebe4b41e4ac055485d411259b as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/cf/5bdb6afebe4b41e4ac055485d411259b 2024-12-15T01:34:36,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/.tmp/cf/c834788d195f4520a0de52b09d590d04 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/cf/c834788d195f4520a0de52b09d590d04 2024-12-15T01:34:36,829 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/cf/c834788d195f4520a0de52b09d590d04, entries=6, sequenceid=6, filesize=5.4 K 2024-12-15T01:34:36,830 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/cf/5bdb6afebe4b41e4ac055485d411259b, entries=44, sequenceid=6, filesize=7.9 K 2024-12-15T01:34:36,830 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for e44dc9a5390c0a1e71cd37627eb0e45a in 32ms, sequenceid=6, compaction requested=false 2024-12-15T01:34:36,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-15T01:34:36,831 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 25307496659a6b1dcf2983063379568b in 32ms, sequenceid=6, compaction requested=false 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for 25307496659a6b1dcf2983063379568b: 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for e44dc9a5390c0a1e71cd37627eb0e45a: 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. for snaptb0-testExportFileSystemState completed. 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. for snaptb0-testExportFileSystemState completed. 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/cf/5bdb6afebe4b41e4ac055485d411259b] hfiles 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/cf/c834788d195f4520a0de52b09d590d04] hfiles 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/cf/5bdb6afebe4b41e4ac055485d411259b for snapshot=snaptb0-testExportFileSystemState 2024-12-15T01:34:36,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/cf/c834788d195f4520a0de52b09d590d04 for snapshot=snaptb0-testExportFileSystemState 2024-12-15T01:34:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741971_1147 (size=110) 2024-12-15T01:34:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741971_1147 (size=110) 2024-12-15T01:34:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741971_1147 (size=110) 2024-12-15T01:34:36,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:36,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-15T01:34:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-15T01:34:36,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:36,842 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:36,844 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure 25307496659a6b1dcf2983063379568b in 198 msec 2024-12-15T01:34:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741972_1148 (size=110) 2024-12-15T01:34:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741972_1148 (size=110) 2024-12-15T01:34:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741972_1148 (size=110) 2024-12-15T01:34:36,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:36,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-15T01:34:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-15T01:34:36,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:36,846 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:36,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-15T01:34:36,848 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:34:36,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a in 202 msec 2024-12-15T01:34:36,848 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:34:36,849 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:34:36,849 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-15T01:34:36,850 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-15T01:34:36,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741973_1149 (size=630) 2024-12-15T01:34:36,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741973_1149 (size=630) 2024-12-15T01:34:36,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741973_1149 (size=630) 2024-12-15T01:34:36,861 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:34:36,867 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:34:36,868 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-15T01:34:36,869 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:34:36,869 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-15T01:34:36,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 237 msec 2024-12-15T01:34:36,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T01:34:36,937 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-15T01:34:36,937 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937 2024-12-15T01:34:36,937 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:36,962 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:36,962 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-15T01:34:36,964 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:34:36,968 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-15T01:34:36,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741975_1151 (size=165) 2024-12-15T01:34:36,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741974_1150 (size=630) 2024-12-15T01:34:36,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741974_1150 (size=630) 2024-12-15T01:34:36,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741974_1150 (size=630) 2024-12-15T01:34:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741975_1151 (size=165) 2024-12-15T01:34:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741975_1151 (size=165) 2024-12-15T01:34:37,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-13301489937487286400.jar 2024-12-15T01:34:37,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:37,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:37,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:37,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-18272426264729231667.jar 2024-12-15T01:34:37,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:37,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:38,052 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-2330889438348196484.jar 2024-12-15T01:34:38,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:38,053 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:38,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:38,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:38,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:38,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:38,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:34:38,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:34:38,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:34:38,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:34:38,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:34:38,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:34:38,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:34:38,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:34:38,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:34:38,059 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:34:38,059 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:34:38,059 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:34:38,060 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:38,060 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:38,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:38,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:38,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:38,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:38,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:38,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741976_1152 (size=127628) 2024-12-15T01:34:38,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741976_1152 (size=127628) 2024-12-15T01:34:38,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741976_1152 (size=127628) 2024-12-15T01:34:38,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741977_1153 (size=2172137) 2024-12-15T01:34:38,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741977_1153 (size=2172137) 2024-12-15T01:34:38,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741977_1153 (size=2172137) 2024-12-15T01:34:38,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741978_1154 (size=213228) 2024-12-15T01:34:38,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741978_1154 (size=213228) 2024-12-15T01:34:38,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741978_1154 (size=213228) 2024-12-15T01:34:38,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741979_1155 (size=1877034) 2024-12-15T01:34:38,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741979_1155 (size=1877034) 2024-12-15T01:34:38,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741979_1155 (size=1877034) 2024-12-15T01:34:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741980_1156 (size=6350909) 2024-12-15T01:34:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741980_1156 (size=6350909) 2024-12-15T01:34:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741980_1156 (size=6350909) 2024-12-15T01:34:38,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741981_1157 (size=533455) 2024-12-15T01:34:38,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741981_1157 (size=533455) 2024-12-15T01:34:38,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741981_1157 (size=533455) 2024-12-15T01:34:38,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741982_1158 (size=7280644) 2024-12-15T01:34:38,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741982_1158 (size=7280644) 2024-12-15T01:34:38,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741982_1158 (size=7280644) 2024-12-15T01:34:38,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741983_1159 (size=4188619) 2024-12-15T01:34:38,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741983_1159 (size=4188619) 2024-12-15T01:34:38,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741983_1159 (size=4188619) 2024-12-15T01:34:38,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741984_1160 (size=20406) 2024-12-15T01:34:38,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741984_1160 (size=20406) 2024-12-15T01:34:38,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741984_1160 (size=20406) 2024-12-15T01:34:38,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741985_1161 (size=75495) 2024-12-15T01:34:38,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741985_1161 (size=75495) 2024-12-15T01:34:38,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741985_1161 (size=75495) 2024-12-15T01:34:38,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741986_1162 (size=45609) 2024-12-15T01:34:38,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741986_1162 (size=45609) 2024-12-15T01:34:38,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741986_1162 (size=45609) 2024-12-15T01:34:38,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741987_1163 (size=110084) 2024-12-15T01:34:38,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741987_1163 (size=110084) 2024-12-15T01:34:38,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741987_1163 (size=110084) 2024-12-15T01:34:38,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741988_1164 (size=1323991) 2024-12-15T01:34:38,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741988_1164 (size=1323991) 2024-12-15T01:34:38,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741988_1164 (size=1323991) 2024-12-15T01:34:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741989_1165 (size=23076) 2024-12-15T01:34:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741989_1165 (size=23076) 2024-12-15T01:34:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741989_1165 (size=23076) 2024-12-15T01:34:38,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741990_1166 (size=126803) 2024-12-15T01:34:38,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741990_1166 (size=126803) 2024-12-15T01:34:38,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741990_1166 (size=126803) 2024-12-15T01:34:38,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741991_1167 (size=322274) 2024-12-15T01:34:38,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741991_1167 (size=322274) 2024-12-15T01:34:38,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741991_1167 (size=322274) 2024-12-15T01:34:38,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741992_1168 (size=1832290) 2024-12-15T01:34:38,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741992_1168 (size=1832290) 2024-12-15T01:34:38,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741992_1168 (size=1832290) 2024-12-15T01:34:38,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741993_1169 (size=912094) 2024-12-15T01:34:38,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741993_1169 (size=912094) 2024-12-15T01:34:38,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741993_1169 (size=912094) 2024-12-15T01:34:38,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741994_1170 (size=30081) 2024-12-15T01:34:38,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741994_1170 (size=30081) 2024-12-15T01:34:38,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741994_1170 (size=30081) 2024-12-15T01:34:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741995_1171 (size=53616) 2024-12-15T01:34:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741995_1171 (size=53616) 2024-12-15T01:34:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741995_1171 (size=53616) 2024-12-15T01:34:38,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741996_1172 (size=29229) 2024-12-15T01:34:38,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741996_1172 (size=29229) 2024-12-15T01:34:38,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741996_1172 (size=29229) 2024-12-15T01:34:38,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741997_1173 (size=451756) 2024-12-15T01:34:38,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741997_1173 (size=451756) 2024-12-15T01:34:38,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741997_1173 (size=451756) 2024-12-15T01:34:38,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741998_1174 (size=169089) 2024-12-15T01:34:38,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741998_1174 (size=169089) 2024-12-15T01:34:38,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741998_1174 (size=169089) 2024-12-15T01:34:38,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741999_1175 (size=5175431) 2024-12-15T01:34:38,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741999_1175 (size=5175431) 2024-12-15T01:34:38,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741999_1175 (size=5175431) 2024-12-15T01:34:38,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742000_1176 (size=136454) 2024-12-15T01:34:38,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742000_1176 (size=136454) 2024-12-15T01:34:38,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742000_1176 (size=136454) 2024-12-15T01:34:39,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742001_1177 (size=3317408) 2024-12-15T01:34:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742001_1177 (size=3317408) 2024-12-15T01:34:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742001_1177 (size=3317408) 2024-12-15T01:34:39,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742002_1178 (size=503880) 2024-12-15T01:34:39,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742002_1178 (size=503880) 2024-12-15T01:34:39,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742002_1178 (size=503880) 2024-12-15T01:34:39,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742003_1179 (size=4695811) 2024-12-15T01:34:39,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742003_1179 (size=4695811) 2024-12-15T01:34:39,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742003_1179 (size=4695811) 2024-12-15T01:34:39,046 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:34:39,049 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-15T01:34:39,051 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:34:39,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742004_1180 (size=344) 2024-12-15T01:34:39,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742004_1180 (size=344) 2024-12-15T01:34:39,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742004_1180 (size=344) 2024-12-15T01:34:39,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742005_1181 (size=15) 2024-12-15T01:34:39,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742005_1181 (size=15) 2024-12-15T01:34:39,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742005_1181 (size=15) 2024-12-15T01:34:39,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742006_1182 (size=304941) 2024-12-15T01:34:39,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742006_1182 (size=304941) 2024-12-15T01:34:39,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742006_1182 (size=304941) 2024-12-15T01:34:39,332 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:34:39,332 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:34:39,334 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0002_000001 (auth:SIMPLE) from 127.0.0.1:60702 2024-12-15T01:34:39,344 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0002/container_1734226430009_0002_01_000001/launch_container.sh] 2024-12-15T01:34:39,344 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0002/container_1734226430009_0002_01_000001/container_tokens] 2024-12-15T01:34:39,344 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0002/container_1734226430009_0002_01_000001/sysfs] 2024-12-15T01:34:40,091 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0003_000001 (auth:SIMPLE) from 127.0.0.1:36428 2024-12-15T01:34:40,359 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:34:40,817 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:34:42,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-15T01:34:42,294 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-15T01:34:42,295 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-15T01:34:42,295 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-15T01:34:45,477 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d0386827b59519398c69d2b9e07bf334 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:34:45,478 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region e44dc9a5390c0a1e71cd37627eb0e45a changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:34:45,478 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 874e078b36e6d955b975f9e096ec95f5 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:34:45,478 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 25307496659a6b1dcf2983063379568b changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:34:46,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0003_000001 (auth:SIMPLE) from 127.0.0.1:45718 2024-12-15T01:34:46,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742007_1183 (size=350615) 2024-12-15T01:34:46,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742007_1183 (size=350615) 2024-12-15T01:34:46,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742007_1183 (size=350615) 2024-12-15T01:34:47,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:34:48,347 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0003_000001 (auth:SIMPLE) from 127.0.0.1:37922 2024-12-15T01:34:51,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742008_1184 (size=8122) 2024-12-15T01:34:51,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742008_1184 (size=8122) 2024-12-15T01:34:51,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742008_1184 (size=8122) 2024-12-15T01:34:51,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742009_1185 (size=5490) 2024-12-15T01:34:51,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742009_1185 (size=5490) 2024-12-15T01:34:51,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742009_1185 (size=5490) 2024-12-15T01:34:52,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742010_1186 (size=17422) 2024-12-15T01:34:52,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742010_1186 (size=17422) 2024-12-15T01:34:52,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742010_1186 (size=17422) 2024-12-15T01:34:52,081 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0003/container_1734226430009_0003_01_000002/launch_container.sh] 2024-12-15T01:34:52,081 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0003/container_1734226430009_0003_01_000002/container_tokens] 2024-12-15T01:34:52,081 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0003/container_1734226430009_0003_01_000002/sysfs] 2024-12-15T01:34:52,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742011_1187 (size=465) 2024-12-15T01:34:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742011_1187 (size=465) 2024-12-15T01:34:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742011_1187 (size=465) 2024-12-15T01:34:52,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742012_1188 (size=17422) 2024-12-15T01:34:52,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742012_1188 (size=17422) 2024-12-15T01:34:52,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742012_1188 (size=17422) 2024-12-15T01:34:52,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742013_1189 (size=350615) 2024-12-15T01:34:52,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742013_1189 (size=350615) 2024-12-15T01:34:52,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742013_1189 (size=350615) 2024-12-15T01:34:53,359 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:34:53,360 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:34:53,371 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-15T01:34:53,371 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:34:53,372 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:34:53,372 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-15T01:34:53,373 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-15T01:34:53,373 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-15T01:34:53,373 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-15T01:34:53,374 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-15T01:34:53,374 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226476937/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-15T01:34:53,386 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-15T01:34:53,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-15T01:34:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-15T01:34:53,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226493390"}]},"ts":"1734226493390"} 2024-12-15T01:34:53,392 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-15T01:34:53,431 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-15T01:34:53,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-15T01:34:53,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e44dc9a5390c0a1e71cd37627eb0e45a, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=25307496659a6b1dcf2983063379568b, UNASSIGN}] 2024-12-15T01:34:53,434 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e44dc9a5390c0a1e71cd37627eb0e45a, UNASSIGN 2024-12-15T01:34:53,435 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=25307496659a6b1dcf2983063379568b, UNASSIGN 2024-12-15T01:34:53,435 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=e44dc9a5390c0a1e71cd37627eb0e45a, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:34:53,435 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=25307496659a6b1dcf2983063379568b, regionState=CLOSING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:53,438 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:53,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE; CloseRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:34:53,439 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:34:53,439 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=77, state=RUNNABLE; CloseRegionProcedure 25307496659a6b1dcf2983063379568b, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:34:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-15T01:34:53,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:34:53,590 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:53,590 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:53,590 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing 25307496659a6b1dcf2983063379568b, disabling compactions & flushes 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing e44dc9a5390c0a1e71cd37627eb0e45a, disabling compactions & flushes 2024-12-15T01:34:53,591 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:53,591 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. after waiting 0 ms 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. after waiting 0 ms 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:53,591 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:53,602 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:34:53,603 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:53,603 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b. 2024-12-15T01:34:53,603 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for 25307496659a6b1dcf2983063379568b: 2024-12-15T01:34:53,606 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed 25307496659a6b1dcf2983063379568b 2024-12-15T01:34:53,606 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=25307496659a6b1dcf2983063379568b, regionState=CLOSED 2024-12-15T01:34:53,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=77 2024-12-15T01:34:53,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=77, state=SUCCESS; CloseRegionProcedure 25307496659a6b1dcf2983063379568b, server=66d575e28c61,42663,1734226422715 in 169 msec 2024-12-15T01:34:53,611 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=25307496659a6b1dcf2983063379568b, UNASSIGN in 176 msec 2024-12-15T01:34:53,619 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:34:53,620 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:34:53,620 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a. 2024-12-15T01:34:53,620 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for e44dc9a5390c0a1e71cd37627eb0e45a: 2024-12-15T01:34:53,622 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:53,622 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=e44dc9a5390c0a1e71cd37627eb0e45a, regionState=CLOSED 2024-12-15T01:34:53,626 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=76 2024-12-15T01:34:53,627 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=76, state=SUCCESS; CloseRegionProcedure e44dc9a5390c0a1e71cd37627eb0e45a, server=66d575e28c61,35281,1734226422799 in 186 msec 2024-12-15T01:34:53,628 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-15T01:34:53,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e44dc9a5390c0a1e71cd37627eb0e45a, UNASSIGN in 192 msec 2024-12-15T01:34:53,632 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-15T01:34:53,632 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 198 msec 2024-12-15T01:34:53,634 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226493634"}]},"ts":"1734226493634"} 2024-12-15T01:34:53,636 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-15T01:34:53,644 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-15T01:34:53,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 258 msec 2024-12-15T01:34:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-15T01:34:53,692 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-15T01:34:53,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-15T01:34:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:53,695 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-15T01:34:53,696 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:53,698 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-15T01:34:53,700 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:53,700 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b 2024-12-15T01:34:53,702 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/recovered.edits] 2024-12-15T01:34:53,702 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/recovered.edits] 2024-12-15T01:34:53,709 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/cf/c834788d195f4520a0de52b09d590d04 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/cf/c834788d195f4520a0de52b09d590d04 2024-12-15T01:34:53,710 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/cf/5bdb6afebe4b41e4ac055485d411259b to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/cf/5bdb6afebe4b41e4ac055485d411259b 2024-12-15T01:34:53,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-15T01:34:53,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-15T01:34:53,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-15T01:34:53,716 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-15T01:34:53,719 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b/recovered.edits/9.seqid 2024-12-15T01:34:53,719 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a/recovered.edits/9.seqid 2024-12-15T01:34:53,721 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/e44dc9a5390c0a1e71cd37627eb0e45a 2024-12-15T01:34:53,722 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemState/25307496659a6b1dcf2983063379568b 2024-12-15T01:34:53,722 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-15T01:34:53,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:53,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:53,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:53,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-15T01:34:53,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-15T01:34:53,726 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:53,731 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-15T01:34:53,737 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-15T01:34:53,739 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:53,739 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-15T01:34:53,739 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226493739"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:53,740 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226493739"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:53,742 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:34:53,742 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e44dc9a5390c0a1e71cd37627eb0e45a, NAME => 'testtb-testExportFileSystemState,,1734226475595.e44dc9a5390c0a1e71cd37627eb0e45a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 25307496659a6b1dcf2983063379568b, NAME => 'testtb-testExportFileSystemState,1,1734226475595.25307496659a6b1dcf2983063379568b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:34:53,742 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-15T01:34:53,742 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226493742"}]},"ts":"9223372036854775807"} 2024-12-15T01:34:53,744 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-15T01:34:53,753 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-15T01:34:53,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 60 msec 2024-12-15T01:34:53,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-15T01:34:53,827 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-15T01:34:53,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-15T01:34:53,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-15T01:34:53,849 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-15T01:34:53,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-15T01:34:53,874 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=795 (was 795), OpenFileDescriptor=816 (was 812) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=530 (was 528) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=4021 (was 4470) 2024-12-15T01:34:53,874 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-15T01:34:53,890 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=795, OpenFileDescriptor=816, MaxFileDescriptor=1048576, SystemLoadAverage=530, ProcessCount=20, AvailableMemoryMB=4020 2024-12-15T01:34:53,890 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-15T01:34:53,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:34:53,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:34:53,893 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:34:53,893 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:53,894 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-15T01:34:53,894 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:34:53,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T01:34:53,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742014_1190 (size=404) 2024-12-15T01:34:53,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742014_1190 (size=404) 2024-12-15T01:34:53,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742014_1190 (size=404) 2024-12-15T01:34:53,911 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 52046ac89a7bb4b821c94c1ddb257f3c, NAME => 'testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:53,912 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => a68e1482fc476ca4ae8ad15463a6ffae, NAME => 'testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742015_1191 (size=65) 2024-12-15T01:34:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742016_1192 (size=65) 2024-12-15T01:34:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742015_1191 (size=65) 2024-12-15T01:34:53,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742015_1191 (size=65) 2024-12-15T01:34:53,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742016_1192 (size=65) 2024-12-15T01:34:53,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742016_1192 (size=65) 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 52046ac89a7bb4b821c94c1ddb257f3c, disabling compactions & flushes 2024-12-15T01:34:53,923 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. after waiting 0 ms 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:53,923 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing a68e1482fc476ca4ae8ad15463a6ffae, disabling compactions & flushes 2024-12-15T01:34:53,923 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 52046ac89a7bb4b821c94c1ddb257f3c: 2024-12-15T01:34:53,924 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:53,924 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:53,924 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. after waiting 0 ms 2024-12-15T01:34:53,924 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:53,924 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:53,924 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for a68e1482fc476ca4ae8ad15463a6ffae: 2024-12-15T01:34:53,925 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:34:53,925 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734226493925"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226493925"}]},"ts":"1734226493925"} 2024-12-15T01:34:53,925 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734226493925"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226493925"}]},"ts":"1734226493925"} 2024-12-15T01:34:53,927 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:34:53,928 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:34:53,928 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226493928"}]},"ts":"1734226493928"} 2024-12-15T01:34:53,929 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-15T01:34:53,948 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:34:53,949 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:34:53,949 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:34:53,949 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:34:53,949 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:34:53,949 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:34:53,949 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:34:53,949 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:34:53,949 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a68e1482fc476ca4ae8ad15463a6ffae, ASSIGN}] 2024-12-15T01:34:53,950 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c, ASSIGN 2024-12-15T01:34:53,950 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a68e1482fc476ca4ae8ad15463a6ffae, ASSIGN 2024-12-15T01:34:53,954 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c, ASSIGN; state=OFFLINE, location=66d575e28c61,42663,1734226422715; forceNewPlan=false, retain=false 2024-12-15T01:34:53,954 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a68e1482fc476ca4ae8ad15463a6ffae, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:34:53,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T01:34:54,104 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:34:54,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=52046ac89a7bb4b821c94c1ddb257f3c, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:54,104 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=a68e1482fc476ca4ae8ad15463a6ffae, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:34:54,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=82, state=RUNNABLE; OpenRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:34:54,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=83, state=RUNNABLE; OpenRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:34:54,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T01:34:54,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:54,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:54,261 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:54,261 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => 52046ac89a7bb4b821c94c1ddb257f3c, NAME => 'testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:34:54,262 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. service=AccessControlService 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => a68e1482fc476ca4ae8ad15463a6ffae, NAME => 'testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:34:54,262 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. service=AccessControlService 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,262 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:34:54,262 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,263 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,263 INFO [StoreOpener-52046ac89a7bb4b821c94c1ddb257f3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,263 INFO [StoreOpener-a68e1482fc476ca4ae8ad15463a6ffae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,264 INFO [StoreOpener-a68e1482fc476ca4ae8ad15463a6ffae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a68e1482fc476ca4ae8ad15463a6ffae columnFamilyName cf 2024-12-15T01:34:54,264 INFO [StoreOpener-52046ac89a7bb4b821c94c1ddb257f3c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52046ac89a7bb4b821c94c1ddb257f3c columnFamilyName cf 2024-12-15T01:34:54,265 DEBUG [StoreOpener-a68e1482fc476ca4ae8ad15463a6ffae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:54,265 DEBUG [StoreOpener-52046ac89a7bb4b821c94c1ddb257f3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:34:54,265 INFO [StoreOpener-52046ac89a7bb4b821c94c1ddb257f3c-1 {}] regionserver.HStore(327): Store=52046ac89a7bb4b821c94c1ddb257f3c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:54,265 INFO [StoreOpener-a68e1482fc476ca4ae8ad15463a6ffae-1 {}] regionserver.HStore(327): Store=a68e1482fc476ca4ae8ad15463a6ffae/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:34:54,266 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,266 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,266 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,266 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,269 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,269 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,271 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:54,271 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:34:54,272 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened a68e1482fc476ca4ae8ad15463a6ffae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70066771, jitterRate=0.04407624900341034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:54,272 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened 52046ac89a7bb4b821c94c1ddb257f3c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65214213, jitterRate=-0.028232499957084656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:34:54,272 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for a68e1482fc476ca4ae8ad15463a6ffae: 2024-12-15T01:34:54,272 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for 52046ac89a7bb4b821c94c1ddb257f3c: 2024-12-15T01:34:54,273 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae., pid=85, masterSystemTime=1734226494259 2024-12-15T01:34:54,273 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c., pid=84, masterSystemTime=1734226494258 2024-12-15T01:34:54,275 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=a68e1482fc476ca4ae8ad15463a6ffae, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:34:54,275 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:54,275 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:54,275 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:54,275 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:54,276 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=52046ac89a7bb4b821c94c1ddb257f3c, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:34:54,278 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=66d575e28c61,42663,1734226422715, table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-15T01:34:54,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=83 2024-12-15T01:34:54,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=82 2024-12-15T01:34:54,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=82, state=SUCCESS; OpenRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c, server=66d575e28c61,42663,1734226422715 in 171 msec 2024-12-15T01:34:54,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=83, state=SUCCESS; OpenRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae, server=66d575e28c61,39381,1734226422872 in 169 msec 2024-12-15T01:34:54,281 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a68e1482fc476ca4ae8ad15463a6ffae, ASSIGN in 331 msec 2024-12-15T01:34:54,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-15T01:34:54,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c, ASSIGN in 332 msec 2024-12-15T01:34:54,283 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:34:54,283 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226494283"}]},"ts":"1734226494283"} 2024-12-15T01:34:54,284 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-15T01:34:54,298 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:34:54,299 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-15T01:34:54,301 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-15T01:34:54,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:54,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:54,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:54,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:34:54,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:54,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:54,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:54,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-15T01:34:54,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 428 msec 2024-12-15T01:34:54,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T01:34:54,499 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-15T01:34:54,499 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-15T01:34:54,499 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:54,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-15T01:34:54,509 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:54,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-15T01:34:54,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-15T01:34:54,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226494513 (current time:1734226494513). 2024-12-15T01:34:54,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:34:54,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-15T01:34:54,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:34:54,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f194f20 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14b8601f 2024-12-15T01:34:54,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78edd6bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:54,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:54,622 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:54,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f194f20 to 127.0.0.1:64248 2024-12-15T01:34:54,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:54,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14f727bd to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e36a86c 2024-12-15T01:34:54,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78f8d33b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:54,642 DEBUG [hconnection-0x400b46c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:54,643 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60070, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:54,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:54,645 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:54,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14f727bd to 127.0.0.1:64248 2024-12-15T01:34:54,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:54,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-15T01:34:54,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:34:54,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-15T01:34:54,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-15T01:34:54,649 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:34:54,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-15T01:34:54,650 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:34:54,652 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:34:54,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742017_1193 (size=161) 2024-12-15T01:34:54,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742017_1193 (size=161) 2024-12-15T01:34:54,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742017_1193 (size=161) 2024-12-15T01:34:54,658 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:34:54,658 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae}] 2024-12-15T01:34:54,659 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,659 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-15T01:34:54,810 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:54,810 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:54,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-15T01:34:54,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 52046ac89a7bb4b821c94c1ddb257f3c: 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for a68e1482fc476ca4ae8ad15463a6ffae: 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. for emptySnaptb0-testConsecutiveExports completed. 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. for emptySnaptb0-testConsecutiveExports completed. 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:34:54,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:34:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742018_1194 (size=68) 2024-12-15T01:34:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742019_1195 (size=68) 2024-12-15T01:34:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742019_1195 (size=68) 2024-12-15T01:34:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742019_1195 (size=68) 2024-12-15T01:34:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742018_1194 (size=68) 2024-12-15T01:34:54,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742018_1194 (size=68) 2024-12-15T01:34:54,818 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:54,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:54,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-15T01:34:54,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-15T01:34:54,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-15T01:34:54,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-15T01:34:54,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,819 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,819 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:54,819 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:54,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c in 162 msec 2024-12-15T01:34:54,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=86 2024-12-15T01:34:54,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae in 162 msec 2024-12-15T01:34:54,821 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:34:54,822 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:34:54,822 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:34:54,822 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-15T01:34:54,823 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-15T01:34:54,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742020_1196 (size=543) 2024-12-15T01:34:54,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742020_1196 (size=543) 2024-12-15T01:34:54,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742020_1196 (size=543) 2024-12-15T01:34:54,840 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:34:54,845 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:34:54,845 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-15T01:34:54,847 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:34:54,847 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-15T01:34:54,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 200 msec 2024-12-15T01:34:54,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-15T01:34:54,952 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-15T01:34:54,960 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:54,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:34:54,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-15T01:34:54,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:54,965 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:34:54,979 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-15T01:34:54,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226494979 (current time:1734226494979). 2024-12-15T01:34:54,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:34:54,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-15T01:34:54,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:34:54,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x089d8752 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12bafdc7 2024-12-15T01:34:55,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d041df0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:55,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:55,051 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:55,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x089d8752 to 127.0.0.1:64248 2024-12-15T01:34:55,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:55,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58f90d03 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d68971e 2024-12-15T01:34:55,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@203ef2de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:34:55,270 DEBUG [hconnection-0x49bb382a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:55,271 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:55,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:34:55,274 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:34:55,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58f90d03 to 127.0.0.1:64248 2024-12-15T01:34:55,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:34:55,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-15T01:34:55,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:34:55,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-15T01:34:55,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-15T01:34:55,277 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:34:55,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-15T01:34:55,278 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:34:55,280 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:34:55,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742021_1197 (size=156) 2024-12-15T01:34:55,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742021_1197 (size=156) 2024-12-15T01:34:55,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742021_1197 (size=156) 2024-12-15T01:34:55,287 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:34:55,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae}] 2024-12-15T01:34:55,288 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:55,288 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:55,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-15T01:34:55,442 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:34:55,442 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:34:55,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-15T01:34:55,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-15T01:34:55,443 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:55,443 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:55,444 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 52046ac89a7bb4b821c94c1ddb257f3c 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-15T01:34:55,444 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing a68e1482fc476ca4ae8ad15463a6ffae 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-15T01:34:55,500 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/.tmp/cf/bacce195bbf54715a4ac7222c513f035 is 71, key is 0007a73f70c4502b522baf17e3cb1d1d/cf:q/1734226494960/Put/seqid=0 2024-12-15T01:34:55,500 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/.tmp/cf/4ba75315670a4d828b3b8ada3a778884 is 71, key is 124e4c2f3c2044e4bd5da11826b48bef/cf:q/1734226494961/Put/seqid=0 2024-12-15T01:34:55,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742023_1199 (size=8188) 2024-12-15T01:34:55,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742023_1199 (size=8188) 2024-12-15T01:34:55,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742023_1199 (size=8188) 2024-12-15T01:34:55,507 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/.tmp/cf/4ba75315670a4d828b3b8ada3a778884 2024-12-15T01:34:55,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742022_1198 (size=5424) 2024-12-15T01:34:55,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742022_1198 (size=5424) 2024-12-15T01:34:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742022_1198 (size=5424) 2024-12-15T01:34:55,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/.tmp/cf/bacce195bbf54715a4ac7222c513f035 2024-12-15T01:34:55,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/.tmp/cf/4ba75315670a4d828b3b8ada3a778884 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/cf/4ba75315670a4d828b3b8ada3a778884 2024-12-15T01:34:55,514 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/.tmp/cf/bacce195bbf54715a4ac7222c513f035 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/cf/bacce195bbf54715a4ac7222c513f035 2024-12-15T01:34:55,519 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/cf/4ba75315670a4d828b3b8ada3a778884, entries=45, sequenceid=6, filesize=8.0 K 2024-12-15T01:34:55,519 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/cf/bacce195bbf54715a4ac7222c513f035, entries=5, sequenceid=6, filesize=5.3 K 2024-12-15T01:34:55,520 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 52046ac89a7bb4b821c94c1ddb257f3c in 76ms, sequenceid=6, compaction requested=false 2024-12-15T01:34:55,520 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for a68e1482fc476ca4ae8ad15463a6ffae in 76ms, sequenceid=6, compaction requested=false 2024-12-15T01:34:55,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-15T01:34:55,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for a68e1482fc476ca4ae8ad15463a6ffae: 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 52046ac89a7bb4b821c94c1ddb257f3c: 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. for snaptb0-testConsecutiveExports completed. 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. for snaptb0-testConsecutiveExports completed. 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/cf/4ba75315670a4d828b3b8ada3a778884] hfiles 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/cf/bacce195bbf54715a4ac7222c513f035] hfiles 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/cf/4ba75315670a4d828b3b8ada3a778884 for snapshot=snaptb0-testConsecutiveExports 2024-12-15T01:34:55,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/cf/bacce195bbf54715a4ac7222c513f035 for snapshot=snaptb0-testConsecutiveExports 2024-12-15T01:34:55,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742025_1201 (size=107) 2024-12-15T01:34:55,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742025_1201 (size=107) 2024-12-15T01:34:55,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742025_1201 (size=107) 2024-12-15T01:34:55,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:34:55,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742024_1200 (size=107) 2024-12-15T01:34:55,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-15T01:34:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742024_1200 (size=107) 2024-12-15T01:34:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742024_1200 (size=107) 2024-12-15T01:34:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-15T01:34:55,534 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:55,535 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:34:55,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:34:55,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-15T01:34:55,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-15T01:34:55,535 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:55,535 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:34:55,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c in 248 msec 2024-12-15T01:34:55,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=89 2024-12-15T01:34:55,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae in 249 msec 2024-12-15T01:34:55,537 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:34:55,537 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:34:55,538 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:34:55,538 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-15T01:34:55,538 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-15T01:34:55,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742026_1202 (size=621) 2024-12-15T01:34:55,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742026_1202 (size=621) 2024-12-15T01:34:55,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742026_1202 (size=621) 2024-12-15T01:34:55,552 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:34:55,557 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:34:55,557 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-15T01:34:55,560 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:34:55,560 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-15T01:34:55,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 284 msec 2024-12-15T01:34:55,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-15T01:34:55,580 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-15T01:34:55,580 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580 2024-12-15T01:34:55,580 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:55,607 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:34:55,607 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@24738cd4, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-15T01:34:55,609 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:34:55,613 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-15T01:34:55,794 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-16294449606396045425.jar 2024-12-15T01:34:55,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:55,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:55,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-10802739308866725794.jar 2024-12-15T01:34:56,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-13240194960959812137.jar 2024-12-15T01:34:56,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:34:56,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:34:56,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:34:56,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:34:56,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:34:56,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:34:56,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:34:56,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:34:56,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:34:56,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:34:56,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:34:56,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:34:56,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:34:56,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:56,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:56,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:56,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:56,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:34:56,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:56,689 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:34:56,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742027_1203 (size=451756) 2024-12-15T01:34:56,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742027_1203 (size=451756) 2024-12-15T01:34:56,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742027_1203 (size=451756) 2024-12-15T01:34:56,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742028_1204 (size=127628) 2024-12-15T01:34:56,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742028_1204 (size=127628) 2024-12-15T01:34:56,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742028_1204 (size=127628) 2024-12-15T01:34:56,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742029_1205 (size=2172137) 2024-12-15T01:34:56,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742029_1205 (size=2172137) 2024-12-15T01:34:56,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742029_1205 (size=2172137) 2024-12-15T01:34:56,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742030_1206 (size=213228) 2024-12-15T01:34:56,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742030_1206 (size=213228) 2024-12-15T01:34:56,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742030_1206 (size=213228) 2024-12-15T01:34:56,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742031_1207 (size=1877034) 2024-12-15T01:34:56,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742031_1207 (size=1877034) 2024-12-15T01:34:56,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742031_1207 (size=1877034) 2024-12-15T01:34:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742032_1208 (size=533455) 2024-12-15T01:34:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742032_1208 (size=533455) 2024-12-15T01:34:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742032_1208 (size=533455) 2024-12-15T01:34:56,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742033_1209 (size=7280644) 2024-12-15T01:34:56,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742033_1209 (size=7280644) 2024-12-15T01:34:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742033_1209 (size=7280644) 2024-12-15T01:34:56,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742034_1210 (size=4188619) 2024-12-15T01:34:56,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742034_1210 (size=4188619) 2024-12-15T01:34:56,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742034_1210 (size=4188619) 2024-12-15T01:34:56,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742035_1211 (size=20406) 2024-12-15T01:34:56,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742035_1211 (size=20406) 2024-12-15T01:34:56,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742035_1211 (size=20406) 2024-12-15T01:34:56,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742036_1212 (size=75495) 2024-12-15T01:34:56,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742036_1212 (size=75495) 2024-12-15T01:34:56,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742036_1212 (size=75495) 2024-12-15T01:34:56,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742037_1213 (size=45609) 2024-12-15T01:34:56,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742037_1213 (size=45609) 2024-12-15T01:34:56,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742037_1213 (size=45609) 2024-12-15T01:34:56,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742038_1214 (size=110084) 2024-12-15T01:34:56,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742038_1214 (size=110084) 2024-12-15T01:34:56,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742038_1214 (size=110084) 2024-12-15T01:34:56,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742039_1215 (size=6350909) 2024-12-15T01:34:56,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742039_1215 (size=6350909) 2024-12-15T01:34:56,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742039_1215 (size=6350909) 2024-12-15T01:34:56,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742040_1216 (size=1323991) 2024-12-15T01:34:56,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742040_1216 (size=1323991) 2024-12-15T01:34:56,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742040_1216 (size=1323991) 2024-12-15T01:34:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742041_1217 (size=23076) 2024-12-15T01:34:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742041_1217 (size=23076) 2024-12-15T01:34:56,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742041_1217 (size=23076) 2024-12-15T01:34:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742042_1218 (size=126803) 2024-12-15T01:34:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742042_1218 (size=126803) 2024-12-15T01:34:56,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742042_1218 (size=126803) 2024-12-15T01:34:56,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742043_1219 (size=322274) 2024-12-15T01:34:56,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742043_1219 (size=322274) 2024-12-15T01:34:56,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742043_1219 (size=322274) 2024-12-15T01:34:56,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742044_1220 (size=1832290) 2024-12-15T01:34:56,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742044_1220 (size=1832290) 2024-12-15T01:34:56,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742044_1220 (size=1832290) 2024-12-15T01:34:56,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742045_1221 (size=30081) 2024-12-15T01:34:56,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742045_1221 (size=30081) 2024-12-15T01:34:56,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742045_1221 (size=30081) 2024-12-15T01:34:56,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742046_1222 (size=53616) 2024-12-15T01:34:56,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742046_1222 (size=53616) 2024-12-15T01:34:56,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742046_1222 (size=53616) 2024-12-15T01:34:56,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742047_1223 (size=29229) 2024-12-15T01:34:56,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742047_1223 (size=29229) 2024-12-15T01:34:56,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742047_1223 (size=29229) 2024-12-15T01:34:56,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742048_1224 (size=169089) 2024-12-15T01:34:56,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742048_1224 (size=169089) 2024-12-15T01:34:56,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742048_1224 (size=169089) 2024-12-15T01:34:57,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742049_1225 (size=5175431) 2024-12-15T01:34:57,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742049_1225 (size=5175431) 2024-12-15T01:34:57,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742049_1225 (size=5175431) 2024-12-15T01:34:57,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742050_1226 (size=136454) 2024-12-15T01:34:57,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742050_1226 (size=136454) 2024-12-15T01:34:57,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742050_1226 (size=136454) 2024-12-15T01:34:57,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742051_1227 (size=3317408) 2024-12-15T01:34:57,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742051_1227 (size=3317408) 2024-12-15T01:34:57,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742051_1227 (size=3317408) 2024-12-15T01:34:57,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742052_1228 (size=912094) 2024-12-15T01:34:57,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742052_1228 (size=912094) 2024-12-15T01:34:57,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742052_1228 (size=912094) 2024-12-15T01:34:57,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742053_1229 (size=503880) 2024-12-15T01:34:57,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742053_1229 (size=503880) 2024-12-15T01:34:57,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742053_1229 (size=503880) 2024-12-15T01:34:57,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742054_1230 (size=4695811) 2024-12-15T01:34:57,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742054_1230 (size=4695811) 2024-12-15T01:34:57,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742054_1230 (size=4695811) 2024-12-15T01:34:57,091 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:34:57,094 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-15T01:34:57,096 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:34:57,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742055_1231 (size=338) 2024-12-15T01:34:57,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742055_1231 (size=338) 2024-12-15T01:34:57,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742055_1231 (size=338) 2024-12-15T01:34:57,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742056_1232 (size=15) 2024-12-15T01:34:57,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742056_1232 (size=15) 2024-12-15T01:34:57,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742056_1232 (size=15) 2024-12-15T01:34:57,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742057_1233 (size=304984) 2024-12-15T01:34:57,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742057_1233 (size=304984) 2024-12-15T01:34:57,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742057_1233 (size=304984) 2024-12-15T01:34:58,326 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:34:58,326 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:34:58,330 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0003_000001 (auth:SIMPLE) from 127.0.0.1:58544 2024-12-15T01:34:58,339 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0003/container_1734226430009_0003_01_000001/launch_container.sh] 2024-12-15T01:34:58,339 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0003/container_1734226430009_0003_01_000001/container_tokens] 2024-12-15T01:34:58,339 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0003/container_1734226430009_0003_01_000001/sysfs] 2024-12-15T01:34:59,053 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0004_000001 (auth:SIMPLE) from 127.0.0.1:42848 2024-12-15T01:34:59,135 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:35:02,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-15T01:35:02,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-15T01:35:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-15T01:35:04,551 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0004_000001 (auth:SIMPLE) from 127.0.0.1:60906 2024-12-15T01:35:04,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742058_1234 (size=350658) 2024-12-15T01:35:04,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742058_1234 (size=350658) 2024-12-15T01:35:04,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742058_1234 (size=350658) 2024-12-15T01:35:06,995 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0004_000001 (auth:SIMPLE) from 127.0.0.1:56790 2024-12-15T01:35:07,797 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:35:10,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742059_1235 (size=17447) 2024-12-15T01:35:10,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742059_1235 (size=17447) 2024-12-15T01:35:10,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742059_1235 (size=17447) 2024-12-15T01:35:10,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742060_1236 (size=462) 2024-12-15T01:35:10,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742060_1236 (size=462) 2024-12-15T01:35:10,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742060_1236 (size=462) 2024-12-15T01:35:10,151 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0004/container_1734226430009_0004_01_000002/launch_container.sh] 2024-12-15T01:35:10,151 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0004/container_1734226430009_0004_01_000002/container_tokens] 2024-12-15T01:35:10,151 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0004/container_1734226430009_0004_01_000002/sysfs] 2024-12-15T01:35:10,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742061_1237 (size=17447) 2024-12-15T01:35:10,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742061_1237 (size=17447) 2024-12-15T01:35:10,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742061_1237 (size=17447) 2024-12-15T01:35:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742062_1238 (size=350658) 2024-12-15T01:35:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742062_1238 (size=350658) 2024-12-15T01:35:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742062_1238 (size=350658) 2024-12-15T01:35:10,240 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0004_000001 (auth:SIMPLE) from 127.0.0.1:38086 2024-12-15T01:35:10,818 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:35:11,317 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:35:11,318 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:35:11,321 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-15T01:35:11,321 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:35:11,322 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:35:11,322 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-15T01:35:11,322 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-15T01:35:11,322 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-15T01:35:11,323 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@24738cd4 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-15T01:35:11,323 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-15T01:35:11,323 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-15T01:35:11,325 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:11,367 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:11,367 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@24738cd4, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-15T01:35:11,370 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:35:11,382 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-15T01:35:11,560 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-7793627587299912932.jar 2024-12-15T01:35:11,560 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:11,560 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:11,560 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-4191043897247207605.jar 2024-12-15T01:35:12,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-5991768050314661800.jar 2024-12-15T01:35:12,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:12,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:35:12,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:35:12,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:35:12,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:35:12,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:35:12,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:35:12,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:35:12,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:35:12,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:35:12,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:35:12,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:35:12,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:35:12,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:12,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:12,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:35:12,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:12,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:12,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:35:12,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:35:12,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742063_1239 (size=127628) 2024-12-15T01:35:12,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742063_1239 (size=127628) 2024-12-15T01:35:12,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742063_1239 (size=127628) 2024-12-15T01:35:12,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742064_1240 (size=2172137) 2024-12-15T01:35:12,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742064_1240 (size=2172137) 2024-12-15T01:35:12,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742064_1240 (size=2172137) 2024-12-15T01:35:12,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742065_1241 (size=213228) 2024-12-15T01:35:12,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742065_1241 (size=213228) 2024-12-15T01:35:12,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742065_1241 (size=213228) 2024-12-15T01:35:12,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742066_1242 (size=1877034) 2024-12-15T01:35:12,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742066_1242 (size=1877034) 2024-12-15T01:35:12,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742066_1242 (size=1877034) 2024-12-15T01:35:12,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742067_1243 (size=912094) 2024-12-15T01:35:12,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742067_1243 (size=912094) 2024-12-15T01:35:12,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742067_1243 (size=912094) 2024-12-15T01:35:12,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742068_1244 (size=533455) 2024-12-15T01:35:12,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742068_1244 (size=533455) 2024-12-15T01:35:12,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742068_1244 (size=533455) 2024-12-15T01:35:12,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742069_1245 (size=7280644) 2024-12-15T01:35:12,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742069_1245 (size=7280644) 2024-12-15T01:35:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742069_1245 (size=7280644) 2024-12-15T01:35:12,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742070_1246 (size=4188619) 2024-12-15T01:35:12,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742070_1246 (size=4188619) 2024-12-15T01:35:12,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742070_1246 (size=4188619) 2024-12-15T01:35:12,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742071_1247 (size=20406) 2024-12-15T01:35:12,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742071_1247 (size=20406) 2024-12-15T01:35:12,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742071_1247 (size=20406) 2024-12-15T01:35:12,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742072_1248 (size=75495) 2024-12-15T01:35:12,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742072_1248 (size=75495) 2024-12-15T01:35:12,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742072_1248 (size=75495) 2024-12-15T01:35:12,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742073_1249 (size=45609) 2024-12-15T01:35:12,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742073_1249 (size=45609) 2024-12-15T01:35:12,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742073_1249 (size=45609) 2024-12-15T01:35:12,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742074_1250 (size=110084) 2024-12-15T01:35:12,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742074_1250 (size=110084) 2024-12-15T01:35:12,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742074_1250 (size=110084) 2024-12-15T01:35:12,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742075_1251 (size=1323991) 2024-12-15T01:35:12,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742075_1251 (size=1323991) 2024-12-15T01:35:12,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742075_1251 (size=1323991) 2024-12-15T01:35:12,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742076_1252 (size=23076) 2024-12-15T01:35:12,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742076_1252 (size=23076) 2024-12-15T01:35:12,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742076_1252 (size=23076) 2024-12-15T01:35:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742077_1253 (size=451756) 2024-12-15T01:35:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742077_1253 (size=451756) 2024-12-15T01:35:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742077_1253 (size=451756) 2024-12-15T01:35:12,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742078_1254 (size=126803) 2024-12-15T01:35:12,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742078_1254 (size=126803) 2024-12-15T01:35:12,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742078_1254 (size=126803) 2024-12-15T01:35:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742079_1255 (size=322274) 2024-12-15T01:35:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742079_1255 (size=322274) 2024-12-15T01:35:12,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742079_1255 (size=322274) 2024-12-15T01:35:12,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742080_1256 (size=6350909) 2024-12-15T01:35:12,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742080_1256 (size=6350909) 2024-12-15T01:35:12,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742080_1256 (size=6350909) 2024-12-15T01:35:12,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742081_1257 (size=1832290) 2024-12-15T01:35:12,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742081_1257 (size=1832290) 2024-12-15T01:35:12,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742081_1257 (size=1832290) 2024-12-15T01:35:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742082_1258 (size=30081) 2024-12-15T01:35:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742082_1258 (size=30081) 2024-12-15T01:35:12,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742082_1258 (size=30081) 2024-12-15T01:35:12,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742083_1259 (size=53616) 2024-12-15T01:35:12,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742083_1259 (size=53616) 2024-12-15T01:35:12,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742083_1259 (size=53616) 2024-12-15T01:35:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742084_1260 (size=29229) 2024-12-15T01:35:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742084_1260 (size=29229) 2024-12-15T01:35:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742084_1260 (size=29229) 2024-12-15T01:35:12,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742085_1261 (size=169089) 2024-12-15T01:35:12,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742085_1261 (size=169089) 2024-12-15T01:35:12,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742085_1261 (size=169089) 2024-12-15T01:35:12,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742086_1262 (size=5175431) 2024-12-15T01:35:12,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742086_1262 (size=5175431) 2024-12-15T01:35:12,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742086_1262 (size=5175431) 2024-12-15T01:35:12,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742087_1263 (size=136454) 2024-12-15T01:35:12,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742087_1263 (size=136454) 2024-12-15T01:35:12,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742087_1263 (size=136454) 2024-12-15T01:35:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742088_1264 (size=3317408) 2024-12-15T01:35:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742088_1264 (size=3317408) 2024-12-15T01:35:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742088_1264 (size=3317408) 2024-12-15T01:35:12,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742089_1265 (size=503880) 2024-12-15T01:35:12,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742089_1265 (size=503880) 2024-12-15T01:35:12,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742089_1265 (size=503880) 2024-12-15T01:35:12,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742090_1266 (size=4695811) 2024-12-15T01:35:12,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742090_1266 (size=4695811) 2024-12-15T01:35:12,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742090_1266 (size=4695811) 2024-12-15T01:35:12,949 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:35:12,951 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-15T01:35:12,953 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:35:12,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742091_1267 (size=338) 2024-12-15T01:35:12,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742091_1267 (size=338) 2024-12-15T01:35:12,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742091_1267 (size=338) 2024-12-15T01:35:12,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742092_1268 (size=15) 2024-12-15T01:35:12,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742092_1268 (size=15) 2024-12-15T01:35:12,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742092_1268 (size=15) 2024-12-15T01:35:12,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742093_1269 (size=304978) 2024-12-15T01:35:12,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742093_1269 (size=304978) 2024-12-15T01:35:12,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742093_1269 (size=304978) 2024-12-15T01:35:16,316 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:35:16,316 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:35:16,319 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0004_000001 (auth:SIMPLE) from 127.0.0.1:38092 2024-12-15T01:35:16,328 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0004/container_1734226430009_0004_01_000001/launch_container.sh] 2024-12-15T01:35:16,328 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0004/container_1734226430009_0004_01_000001/container_tokens] 2024-12-15T01:35:16,328 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0004/container_1734226430009_0004_01_000001/sysfs] 2024-12-15T01:35:16,336 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0005_000001 (auth:SIMPLE) from 127.0.0.1:58258 2024-12-15T01:35:21,229 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0005_000001 (auth:SIMPLE) from 127.0.0.1:37224 2024-12-15T01:35:21,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742094_1270 (size=350652) 2024-12-15T01:35:21,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742094_1270 (size=350652) 2024-12-15T01:35:21,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742094_1270 (size=350652) 2024-12-15T01:35:23,534 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0005_000001 (auth:SIMPLE) from 127.0.0.1:53474 2024-12-15T01:35:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742095_1271 (size=16913) 2024-12-15T01:35:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742095_1271 (size=16913) 2024-12-15T01:35:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742095_1271 (size=16913) 2024-12-15T01:35:27,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742096_1272 (size=462) 2024-12-15T01:35:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742096_1272 (size=462) 2024-12-15T01:35:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742096_1272 (size=462) 2024-12-15T01:35:27,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742097_1273 (size=16913) 2024-12-15T01:35:27,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742097_1273 (size=16913) 2024-12-15T01:35:27,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742097_1273 (size=16913) 2024-12-15T01:35:27,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742098_1274 (size=350652) 2024-12-15T01:35:27,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742098_1274 (size=350652) 2024-12-15T01:35:27,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742098_1274 (size=350652) 2024-12-15T01:35:27,330 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0005_000001 (auth:SIMPLE) from 127.0.0.1:53478 2024-12-15T01:35:29,247 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:35:29,248 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:35:29,258 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-15T01:35:29,258 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:35:29,258 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:35:29,258 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-15T01:35:29,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-15T01:35:29,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-15T01:35:29,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@24738cd4 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-15T01:35:29,263 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-15T01:35:29,263 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226495580/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-15T01:35:29,285 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-15T01:35:29,286 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-15T01:35:29,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:35:29,290 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226529290"}]},"ts":"1734226529290"} 2024-12-15T01:35:29,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T01:35:29,292 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-15T01:35:29,330 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-15T01:35:29,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-15T01:35:29,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a68e1482fc476ca4ae8ad15463a6ffae, UNASSIGN}] 2024-12-15T01:35:29,332 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a68e1482fc476ca4ae8ad15463a6ffae, UNASSIGN 2024-12-15T01:35:29,332 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c, UNASSIGN 2024-12-15T01:35:29,333 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=a68e1482fc476ca4ae8ad15463a6ffae, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:29,333 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=52046ac89a7bb4b821c94c1ddb257f3c, regionState=CLOSING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:35:29,334 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:35:29,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:35:29,335 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:35:29,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=94, state=RUNNABLE; CloseRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:35:29,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T01:35:29,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:29,487 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:35:29,487 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:35:29,487 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:35:29,487 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing a68e1482fc476ca4ae8ad15463a6ffae, disabling compactions & flushes 2024-12-15T01:35:29,487 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:35:29,487 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:35:29,487 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. after waiting 0 ms 2024-12-15T01:35:29,487 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:35:29,488 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:35:29,488 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:35:29,488 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing 52046ac89a7bb4b821c94c1ddb257f3c, disabling compactions & flushes 2024-12-15T01:35:29,488 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:35:29,488 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:35:29,488 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. after waiting 0 ms 2024-12-15T01:35:29,488 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:35:29,493 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:35:29,494 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:35:29,494 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c. 2024-12-15T01:35:29,494 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for 52046ac89a7bb4b821c94c1ddb257f3c: 2024-12-15T01:35:29,496 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed 52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:35:29,496 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:35:29,496 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=52046ac89a7bb4b821c94c1ddb257f3c, regionState=CLOSED 2024-12-15T01:35:29,497 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:35:29,497 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae. 2024-12-15T01:35:29,497 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for a68e1482fc476ca4ae8ad15463a6ffae: 2024-12-15T01:35:29,498 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:35:29,499 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=a68e1482fc476ca4ae8ad15463a6ffae, regionState=CLOSED 2024-12-15T01:35:29,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=94 2024-12-15T01:35:29,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=94, state=SUCCESS; CloseRegionProcedure 52046ac89a7bb4b821c94c1ddb257f3c, server=66d575e28c61,42663,1734226422715 in 163 msec 2024-12-15T01:35:29,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=52046ac89a7bb4b821c94c1ddb257f3c, UNASSIGN in 168 msec 2024-12-15T01:35:29,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-15T01:35:29,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure a68e1482fc476ca4ae8ad15463a6ffae, server=66d575e28c61,39381,1734226422872 in 166 msec 2024-12-15T01:35:29,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-12-15T01:35:29,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a68e1482fc476ca4ae8ad15463a6ffae, UNASSIGN in 171 msec 2024-12-15T01:35:29,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-15T01:35:29,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 174 msec 2024-12-15T01:35:29,507 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226529507"}]},"ts":"1734226529507"} 2024-12-15T01:35:29,508 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-15T01:35:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T01:35:29,763 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-15T01:35:29,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 478 msec 2024-12-15T01:35:29,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T01:35:29,895 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-15T01:35:29,896 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-15T01:35:29,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:35:29,899 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:35:29,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-15T01:35:29,900 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:35:29,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-15T01:35:29,915 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:35:29,917 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/recovered.edits] 2024-12-15T01:35:29,938 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:35:29,941 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/recovered.edits] 2024-12-15T01:35:29,946 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/cf/bacce195bbf54715a4ac7222c513f035 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/cf/bacce195bbf54715a4ac7222c513f035 2024-12-15T01:35:29,951 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c/recovered.edits/9.seqid 2024-12-15T01:35:29,952 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/52046ac89a7bb4b821c94c1ddb257f3c 2024-12-15T01:35:29,964 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/cf/4ba75315670a4d828b3b8ada3a778884 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/cf/4ba75315670a4d828b3b8ada3a778884 2024-12-15T01:35:29,972 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae/recovered.edits/9.seqid 2024-12-15T01:35:29,973 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testConsecutiveExports/a68e1482fc476ca4ae8ad15463a6ffae 2024-12-15T01:35:29,973 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-15T01:35:29,976 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:35:29,985 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-15T01:35:29,996 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-15T01:35:29,997 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:35:29,998 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-15T01:35:29,998 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226529998"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:29,998 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226529998"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:30,007 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:35:30,007 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 52046ac89a7bb4b821c94c1ddb257f3c, NAME => 'testtb-testConsecutiveExports,,1734226493891.52046ac89a7bb4b821c94c1ddb257f3c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a68e1482fc476ca4ae8ad15463a6ffae, NAME => 'testtb-testConsecutiveExports,1,1734226493891.a68e1482fc476ca4ae8ad15463a6ffae.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:35:30,007 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-15T01:35:30,007 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226530007"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:30,009 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-15T01:35:30,177 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-15T01:35:30,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 281 msec 2024-12-15T01:35:30,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,355 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-15T01:35:30,355 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-15T01:35:30,355 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-15T01:35:30,355 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:30,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-15T01:35:30,457 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-15T01:35:30,465 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-15T01:35:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-15T01:35:30,469 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-15T01:35:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-15T01:35:30,492 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=800 (was 795) Potentially hanging thread: Thread-3891 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:45325 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x613351a8-shared-pool-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 64037) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:45464 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:44990 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:57926 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45325 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1768612408_1 at /127.0.0.1:58476 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=814 (was 816), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=573 (was 530) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 20), AvailableMemoryMB=3586 (was 4020) 2024-12-15T01:35:30,493 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-15T01:35:30,507 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=800, OpenFileDescriptor=814, MaxFileDescriptor=1048576, SystemLoadAverage=573, ProcessCount=17, AvailableMemoryMB=3585 2024-12-15T01:35:30,507 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-15T01:35:30,509 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:35:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:30,510 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:35:30,510 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:30,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-15T01:35:30,511 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:35:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-15T01:35:30,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742099_1275 (size=422) 2024-12-15T01:35:30,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742099_1275 (size=422) 2024-12-15T01:35:30,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742099_1275 (size=422) 2024-12-15T01:35:30,519 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7776d3792f58052a078812d95f884c33, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:30,520 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 9b718fb94e54ecba6d4eaf293307b363, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:30,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742100_1276 (size=83) 2024-12-15T01:35:30,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742100_1276 (size=83) 2024-12-15T01:35:30,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742100_1276 (size=83) 2024-12-15T01:35:30,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742101_1277 (size=83) 2024-12-15T01:35:30,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742101_1277 (size=83) 2024-12-15T01:35:30,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742101_1277 (size=83) 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing 9b718fb94e54ecba6d4eaf293307b363, disabling compactions & flushes 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 7776d3792f58052a078812d95f884c33, disabling compactions & flushes 2024-12-15T01:35:30,535 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:30,535 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. after waiting 0 ms 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. after waiting 0 ms 2024-12-15T01:35:30,535 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for 9b718fb94e54ecba6d4eaf293307b363: 2024-12-15T01:35:30,535 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:30,535 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7776d3792f58052a078812d95f884c33: 2024-12-15T01:35:30,536 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:35:30,536 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734226530536"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226530536"}]},"ts":"1734226530536"} 2024-12-15T01:35:30,536 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734226530536"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226530536"}]},"ts":"1734226530536"} 2024-12-15T01:35:30,539 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:35:30,539 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:35:30,540 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226530539"}]},"ts":"1734226530539"} 2024-12-15T01:35:30,541 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-15T01:35:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-15T01:35:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-15T01:35:30,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:35:30,839 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:35:30,839 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:35:30,839 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:35:30,839 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:35:30,839 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:35:30,839 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:35:30,839 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:35:30,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7776d3792f58052a078812d95f884c33, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9b718fb94e54ecba6d4eaf293307b363, ASSIGN}] 2024-12-15T01:35:30,841 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7776d3792f58052a078812d95f884c33, ASSIGN 2024-12-15T01:35:30,841 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9b718fb94e54ecba6d4eaf293307b363, ASSIGN 2024-12-15T01:35:30,842 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7776d3792f58052a078812d95f884c33, ASSIGN; state=OFFLINE, location=66d575e28c61,42663,1734226422715; forceNewPlan=false, retain=false 2024-12-15T01:35:30,842 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9b718fb94e54ecba6d4eaf293307b363, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:35:30,992 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:35:30,992 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=9b718fb94e54ecba6d4eaf293307b363, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:30,992 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=7776d3792f58052a078812d95f884c33, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:35:30,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure 9b718fb94e54ecba6d4eaf293307b363, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:35:30,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=100, state=RUNNABLE; OpenRegionProcedure 7776d3792f58052a078812d95f884c33, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:35:31,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-15T01:35:31,146 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:35:31,146 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:31,149 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:31,149 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:31,149 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 9b718fb94e54ecba6d4eaf293307b363, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:35:31,149 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 7776d3792f58052a078812d95f884c33, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:35:31,149 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. service=AccessControlService 2024-12-15T01:35:31,149 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. service=AccessControlService 2024-12-15T01:35:31,149 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:31,149 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,150 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,151 INFO [StoreOpener-7776d3792f58052a078812d95f884c33-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,151 INFO [StoreOpener-9b718fb94e54ecba6d4eaf293307b363-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,152 INFO [StoreOpener-9b718fb94e54ecba6d4eaf293307b363-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9b718fb94e54ecba6d4eaf293307b363 columnFamilyName cf 2024-12-15T01:35:31,152 INFO [StoreOpener-7776d3792f58052a078812d95f884c33-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7776d3792f58052a078812d95f884c33 columnFamilyName cf 2024-12-15T01:35:31,152 DEBUG [StoreOpener-7776d3792f58052a078812d95f884c33-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:31,152 DEBUG [StoreOpener-9b718fb94e54ecba6d4eaf293307b363-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:31,153 INFO [StoreOpener-9b718fb94e54ecba6d4eaf293307b363-1 {}] regionserver.HStore(327): Store=9b718fb94e54ecba6d4eaf293307b363/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:31,153 INFO [StoreOpener-7776d3792f58052a078812d95f884c33-1 {}] regionserver.HStore(327): Store=7776d3792f58052a078812d95f884c33/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:31,154 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,154 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,154 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,154 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,156 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,156 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,159 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:31,159 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:31,160 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 9b718fb94e54ecba6d4eaf293307b363; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67466080, jitterRate=0.005322933197021484}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:31,160 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 7776d3792f58052a078812d95f884c33; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63695170, jitterRate=-0.05086800456047058}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:31,160 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 7776d3792f58052a078812d95f884c33: 2024-12-15T01:35:31,160 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 9b718fb94e54ecba6d4eaf293307b363: 2024-12-15T01:35:31,161 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363., pid=102, masterSystemTime=1734226531146 2024-12-15T01:35:31,161 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33., pid=103, masterSystemTime=1734226531146 2024-12-15T01:35:31,162 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:31,162 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:31,163 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:31,163 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=9b718fb94e54ecba6d4eaf293307b363, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:31,163 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:31,163 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=7776d3792f58052a078812d95f884c33, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:35:31,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-15T01:35:31,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=100 2024-12-15T01:35:31,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure 9b718fb94e54ecba6d4eaf293307b363, server=66d575e28c61,35281,1734226422799 in 170 msec 2024-12-15T01:35:31,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=100, state=SUCCESS; OpenRegionProcedure 7776d3792f58052a078812d95f884c33, server=66d575e28c61,42663,1734226422715 in 170 msec 2024-12-15T01:35:31,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9b718fb94e54ecba6d4eaf293307b363, ASSIGN in 326 msec 2024-12-15T01:35:31,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-15T01:35:31,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7776d3792f58052a078812d95f884c33, ASSIGN in 326 msec 2024-12-15T01:35:31,168 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:35:31,168 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226531168"}]},"ts":"1734226531168"} 2024-12-15T01:35:31,169 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-15T01:35:31,186 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:35:31,187 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-15T01:35:31,188 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-15T01:35:31,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:31,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:31,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:31,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:31,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:31,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:31,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:31,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:31,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 696 msec 2024-12-15T01:35:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-15T01:35:31,617 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-15T01:35:31,617 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-15T01:35:31,617 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:31,623 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-15T01:35:31,623 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:31,623 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-15T01:35:31,627 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-15T01:35:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226531627 (current time:1734226531627). 2024-12-15T01:35:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:35:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-15T01:35:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:35:31,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x30caec60 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc35dce 2024-12-15T01:35:31,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d1a4391, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:31,666 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50698, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x30caec60 to 127.0.0.1:64248 2024-12-15T01:35:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f036d9e to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6eef789d 2024-12-15T01:35:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@186b81a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:31,688 DEBUG [hconnection-0x20629448-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:31,690 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:31,693 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f036d9e to 127.0.0.1:64248 2024-12-15T01:35:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-15T01:35:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:35:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-15T01:35:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-15T01:35:31,699 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:35:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T01:35:31,700 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:35:31,702 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:35:31,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742102_1278 (size=215) 2024-12-15T01:35:31,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742102_1278 (size=215) 2024-12-15T01:35:31,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742102_1278 (size=215) 2024-12-15T01:35:31,718 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:35:31,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363}] 2024-12-15T01:35:31,718 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,719 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T01:35:31,871 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:35:31,871 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:31,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-15T01:35:31,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-15T01:35:31,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:31,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:31,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 7776d3792f58052a078812d95f884c33: 2024-12-15T01:35:31,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for 9b718fb94e54ecba6d4eaf293307b363: 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:35:31,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:35:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742103_1279 (size=86) 2024-12-15T01:35:31,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742104_1280 (size=86) 2024-12-15T01:35:31,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742103_1279 (size=86) 2024-12-15T01:35:31,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742103_1279 (size=86) 2024-12-15T01:35:31,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742104_1280 (size=86) 2024-12-15T01:35:31,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:31,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-15T01:35:31,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742104_1280 (size=86) 2024-12-15T01:35:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-15T01:35:31,880 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,880 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:31,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:31,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-15T01:35:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-15T01:35:31,880 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,881 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:31,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33 in 163 msec 2024-12-15T01:35:31,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=104 2024-12-15T01:35:31,882 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:35:31,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363 in 163 msec 2024-12-15T01:35:31,882 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:35:31,883 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:35:31,883 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:31,883 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:31,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742105_1281 (size=597) 2024-12-15T01:35:31,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742105_1281 (size=597) 2024-12-15T01:35:31,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742105_1281 (size=597) 2024-12-15T01:35:31,892 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:35:31,897 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:35:31,898 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:31,899 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:35:31,899 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-15T01:35:31,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 202 msec 2024-12-15T01:35:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T01:35:32,002 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-15T01:35:32,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:35:32,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:35:32,013 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,013 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:32,013 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:32,030 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-15T01:35:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226532030 (current time:1734226532030). 2024-12-15T01:35:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:35:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-15T01:35:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:35:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35f5e59f to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@181f7bd 2024-12-15T01:35:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e758680, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:32,041 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35f5e59f to 127.0.0.1:64248 2024-12-15T01:35:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:32,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bd0bf00 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@756283d8 2024-12-15T01:35:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d14a72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:32,071 DEBUG [hconnection-0x7d0583cf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:32,073 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:32,075 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bd0bf00 to 127.0.0.1:64248 2024-12-15T01:35:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-15T01:35:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:35:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-15T01:35:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-15T01:35:32,079 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:35:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-15T01:35:32,080 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:35:32,082 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:35:32,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742106_1282 (size=210) 2024-12-15T01:35:32,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742106_1282 (size=210) 2024-12-15T01:35:32,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742106_1282 (size=210) 2024-12-15T01:35:32,089 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:35:32,089 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363}] 2024-12-15T01:35:32,090 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:32,090 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-15T01:35:32,241 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:35:32,241 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:32,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-15T01:35:32,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-15T01:35:32,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:32,242 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:32,242 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 7776d3792f58052a078812d95f884c33 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-15T01:35:32,242 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 9b718fb94e54ecba6d4eaf293307b363 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-15T01:35:32,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/.tmp/cf/8a12fe24ec2a4ec88d2b5831f19c8271 is 71, key is 05981c15df158ade887c3d06a02b7982/cf:q/1734226532009/Put/seqid=0 2024-12-15T01:35:32,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/.tmp/cf/e1fca6fda98e40f58bfe399a639e55e8 is 71, key is 17435bf112f729a308e62203262fabbb/cf:q/1734226532009/Put/seqid=0 2024-12-15T01:35:32,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742107_1283 (size=5286) 2024-12-15T01:35:32,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742107_1283 (size=5286) 2024-12-15T01:35:32,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742107_1283 (size=5286) 2024-12-15T01:35:32,263 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/.tmp/cf/8a12fe24ec2a4ec88d2b5831f19c8271 2024-12-15T01:35:32,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742108_1284 (size=8324) 2024-12-15T01:35:32,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742108_1284 (size=8324) 2024-12-15T01:35:32,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742108_1284 (size=8324) 2024-12-15T01:35:32,268 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/.tmp/cf/e1fca6fda98e40f58bfe399a639e55e8 2024-12-15T01:35:32,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/.tmp/cf/8a12fe24ec2a4ec88d2b5831f19c8271 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/cf/8a12fe24ec2a4ec88d2b5831f19c8271 2024-12-15T01:35:32,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/cf/8a12fe24ec2a4ec88d2b5831f19c8271, entries=3, sequenceid=6, filesize=5.2 K 2024-12-15T01:35:32,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/.tmp/cf/e1fca6fda98e40f58bfe399a639e55e8 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/cf/e1fca6fda98e40f58bfe399a639e55e8 2024-12-15T01:35:32,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 7776d3792f58052a078812d95f884c33 in 30ms, sequenceid=6, compaction requested=false 2024-12-15T01:35:32,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-15T01:35:32,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 7776d3792f58052a078812d95f884c33: 2024-12-15T01:35:32,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-15T01:35:32,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:32,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/cf/8a12fe24ec2a4ec88d2b5831f19c8271] hfiles 2024-12-15T01:35:32,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/cf/8a12fe24ec2a4ec88d2b5831f19c8271 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,277 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/cf/e1fca6fda98e40f58bfe399a639e55e8, entries=47, sequenceid=6, filesize=8.1 K 2024-12-15T01:35:32,278 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 9b718fb94e54ecba6d4eaf293307b363 in 35ms, sequenceid=6, compaction requested=false 2024-12-15T01:35:32,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 9b718fb94e54ecba6d4eaf293307b363: 2024-12-15T01:35:32,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-15T01:35:32,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:32,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/cf/e1fca6fda98e40f58bfe399a639e55e8] hfiles 2024-12-15T01:35:32,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/cf/e1fca6fda98e40f58bfe399a639e55e8 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742109_1285 (size=125) 2024-12-15T01:35:32,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742109_1285 (size=125) 2024-12-15T01:35:32,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742109_1285 (size=125) 2024-12-15T01:35:32,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:32,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-15T01:35:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-15T01:35:32,281 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:32,281 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:32,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 7776d3792f58052a078812d95f884c33 in 193 msec 2024-12-15T01:35:32,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742110_1286 (size=125) 2024-12-15T01:35:32,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742110_1286 (size=125) 2024-12-15T01:35:32,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742110_1286 (size=125) 2024-12-15T01:35:32,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:32,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-15T01:35:32,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-15T01:35:32,289 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:32,289 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:32,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=107 2024-12-15T01:35:32,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure 9b718fb94e54ecba6d4eaf293307b363 in 200 msec 2024-12-15T01:35:32,290 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:35:32,291 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:35:32,291 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:35:32,291 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,292 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-15T01:35:32,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-15T01:35:32,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742111_1287 (size=675) 2024-12-15T01:35:32,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742111_1287 (size=675) 2024-12-15T01:35:32,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742111_1287 (size=675) 2024-12-15T01:35:32,301 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:35:32,306 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:35:32,306 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:32,307 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:35:32,307 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-15T01:35:32,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 230 msec 2024-12-15T01:35:32,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-15T01:35:32,381 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-15T01:35:32,399 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-15T01:35:32,401 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39898, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-15T01:35:32,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-15T01:35:32,415 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-15T01:35:32,417 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-15T01:35:32,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-15T01:35:32,417 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-15T01:35:32,419 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-15T01:35:32,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-15T01:35:32,421 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:35:32,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:32,422 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:35:32,423 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:32,423 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-15T01:35:32,423 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:35:32,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T01:35:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742112_1288 (size=399) 2024-12-15T01:35:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742112_1288 (size=399) 2024-12-15T01:35:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742112_1288 (size=399) 2024-12-15T01:35:32,434 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => d17accb6d8d6e80a75d9033cf5bb5991, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:32,434 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bf7c67a04da076d05e76ef76f915c1e2, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:32,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0005/container_1734226430009_0005_01_000002/launch_container.sh] 2024-12-15T01:35:32,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0005/container_1734226430009_0005_01_000002/container_tokens] 2024-12-15T01:35:32,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0005/container_1734226430009_0005_01_000002/sysfs] 2024-12-15T01:35:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742114_1290 (size=85) 2024-12-15T01:35:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742113_1289 (size=85) 2024-12-15T01:35:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742114_1290 (size=85) 2024-12-15T01:35:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742113_1289 (size=85) 2024-12-15T01:35:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742114_1290 (size=85) 2024-12-15T01:35:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742113_1289 (size=85) 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing d17accb6d8d6e80a75d9033cf5bb5991, disabling compactions & flushes 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing bf7c67a04da076d05e76ef76f915c1e2, disabling compactions & flushes 2024-12-15T01:35:32,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:32,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. after waiting 0 ms 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. after waiting 0 ms 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:32,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:32,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for bf7c67a04da076d05e76ef76f915c1e2: 2024-12-15T01:35:32,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for d17accb6d8d6e80a75d9033cf5bb5991: 2024-12-15T01:35:32,442 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:35:32,442 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734226532442"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226532442"}]},"ts":"1734226532442"} 2024-12-15T01:35:32,442 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734226532442"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226532442"}]},"ts":"1734226532442"} 2024-12-15T01:35:32,444 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:35:32,445 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:35:32,445 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226532445"}]},"ts":"1734226532445"} 2024-12-15T01:35:32,446 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-15T01:35:32,463 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:35:32,464 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:35:32,465 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:35:32,465 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:35:32,465 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:35:32,465 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:35:32,465 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:35:32,465 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:35:32,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bf7c67a04da076d05e76ef76f915c1e2, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d17accb6d8d6e80a75d9033cf5bb5991, ASSIGN}] 2024-12-15T01:35:32,466 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d17accb6d8d6e80a75d9033cf5bb5991, ASSIGN 2024-12-15T01:35:32,466 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bf7c67a04da076d05e76ef76f915c1e2, ASSIGN 2024-12-15T01:35:32,467 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d17accb6d8d6e80a75d9033cf5bb5991, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:35:32,467 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bf7c67a04da076d05e76ef76f915c1e2, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:35:32,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T01:35:32,618 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:35:32,619 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=d17accb6d8d6e80a75d9033cf5bb5991, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:32,619 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=bf7c67a04da076d05e76ef76f915c1e2, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:32,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure d17accb6d8d6e80a75d9033cf5bb5991, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:35:32,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE; OpenRegionProcedure bf7c67a04da076d05e76ef76f915c1e2, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:35:32,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T01:35:32,776 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:32,778 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:32,780 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:32,781 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => d17accb6d8d6e80a75d9033cf5bb5991, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991.', STARTKEY => '2', ENDKEY => ''} 2024-12-15T01:35:32,781 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. service=AccessControlService 2024-12-15T01:35:32,781 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => bf7c67a04da076d05e76ef76f915c1e2, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2.', STARTKEY => '', ENDKEY => '2'} 2024-12-15T01:35:32,782 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. service=AccessControlService 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:32,782 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:32,782 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:32,783 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:32,783 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:32,785 INFO [StoreOpener-d17accb6d8d6e80a75d9033cf5bb5991-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:32,785 INFO [StoreOpener-bf7c67a04da076d05e76ef76f915c1e2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:32,787 INFO [StoreOpener-d17accb6d8d6e80a75d9033cf5bb5991-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d17accb6d8d6e80a75d9033cf5bb5991 columnFamilyName cf 2024-12-15T01:35:32,787 DEBUG [StoreOpener-d17accb6d8d6e80a75d9033cf5bb5991-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:32,787 INFO [StoreOpener-bf7c67a04da076d05e76ef76f915c1e2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bf7c67a04da076d05e76ef76f915c1e2 columnFamilyName cf 2024-12-15T01:35:32,787 DEBUG [StoreOpener-bf7c67a04da076d05e76ef76f915c1e2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:32,788 INFO [StoreOpener-d17accb6d8d6e80a75d9033cf5bb5991-1 {}] regionserver.HStore(327): Store=d17accb6d8d6e80a75d9033cf5bb5991/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:32,788 INFO [StoreOpener-bf7c67a04da076d05e76ef76f915c1e2-1 {}] regionserver.HStore(327): Store=bf7c67a04da076d05e76ef76f915c1e2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:32,789 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:32,789 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:32,790 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:32,790 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:32,792 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:32,792 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:32,795 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:32,795 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:32,795 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened d17accb6d8d6e80a75d9033cf5bb5991; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62221672, jitterRate=-0.07282483577728271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:32,795 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened bf7c67a04da076d05e76ef76f915c1e2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63719661, jitterRate=-0.05050306022167206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:32,796 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for bf7c67a04da076d05e76ef76f915c1e2: 2024-12-15T01:35:32,796 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for d17accb6d8d6e80a75d9033cf5bb5991: 2024-12-15T01:35:32,796 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2., pid=114, masterSystemTime=1734226532778 2024-12-15T01:35:32,796 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991., pid=113, masterSystemTime=1734226532776 2024-12-15T01:35:32,797 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:32,797 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:32,798 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=bf7c67a04da076d05e76ef76f915c1e2, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:32,798 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:32,798 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:32,798 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=d17accb6d8d6e80a75d9033cf5bb5991, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:32,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=111 2024-12-15T01:35:32,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-15T01:35:32,801 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure d17accb6d8d6e80a75d9033cf5bb5991, server=66d575e28c61,35281,1734226422799 in 177 msec 2024-12-15T01:35:32,801 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=111, state=SUCCESS; OpenRegionProcedure bf7c67a04da076d05e76ef76f915c1e2, server=66d575e28c61,39381,1734226422872 in 175 msec 2024-12-15T01:35:32,801 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bf7c67a04da076d05e76ef76f915c1e2, ASSIGN in 335 msec 2024-12-15T01:35:32,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=110 2024-12-15T01:35:32,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d17accb6d8d6e80a75d9033cf5bb5991, ASSIGN in 335 msec 2024-12-15T01:35:32,802 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:35:32,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226532802"}]},"ts":"1734226532802"} 2024-12-15T01:35:32,803 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-15T01:35:32,813 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:35:32,814 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-15T01:35:32,816 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-15T01:35:32,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:32,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:32,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:32,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:32,830 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,830 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,830 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,831 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,831 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,831 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,831 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,831 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:32,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 409 msec 2024-12-15T01:35:32,883 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-15T01:35:33,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T01:35:33,028 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-15T01:35:33,052 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [bf7c67a04da076d05e76ef76f915c1e2, d17accb6d8d6e80a75d9033cf5bb5991] 2024-12-15T01:35:33,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bf7c67a04da076d05e76ef76f915c1e2, d17accb6d8d6e80a75d9033cf5bb5991], force=true 2024-12-15T01:35:33,057 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bf7c67a04da076d05e76ef76f915c1e2, d17accb6d8d6e80a75d9033cf5bb5991], force=true 2024-12-15T01:35:33,057 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bf7c67a04da076d05e76ef76f915c1e2, d17accb6d8d6e80a75d9033cf5bb5991], force=true 2024-12-15T01:35:33,057 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bf7c67a04da076d05e76ef76f915c1e2, d17accb6d8d6e80a75d9033cf5bb5991], force=true 2024-12-15T01:35:33,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-15T01:35:33,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bf7c67a04da076d05e76ef76f915c1e2, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d17accb6d8d6e80a75d9033cf5bb5991, UNASSIGN}] 2024-12-15T01:35:33,068 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bf7c67a04da076d05e76ef76f915c1e2, UNASSIGN 2024-12-15T01:35:33,068 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d17accb6d8d6e80a75d9033cf5bb5991, UNASSIGN 2024-12-15T01:35:33,068 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=bf7c67a04da076d05e76ef76f915c1e2, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:33,068 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=d17accb6d8d6e80a75d9033cf5bb5991, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:33,070 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-15T01:35:33,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE; CloseRegionProcedure bf7c67a04da076d05e76ef76f915c1e2, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:35:33,070 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-15T01:35:33,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=117, state=RUNNABLE; CloseRegionProcedure d17accb6d8d6e80a75d9033cf5bb5991, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:35:33,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-15T01:35:33,221 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:33,222 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:33,223 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:33,223 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-15T01:35:33,223 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing bf7c67a04da076d05e76ef76f915c1e2, disabling compactions & flushes 2024-12-15T01:35:33,223 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:33,223 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:33,223 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. after waiting 0 ms 2024-12-15T01:35:33,223 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:33,223 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:33,223 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing bf7c67a04da076d05e76ef76f915c1e2 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-15T01:35:33,223 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-15T01:35:33,224 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing d17accb6d8d6e80a75d9033cf5bb5991, disabling compactions & flushes 2024-12-15T01:35:33,224 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:33,224 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:33,224 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. after waiting 0 ms 2024-12-15T01:35:33,224 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:33,224 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing d17accb6d8d6e80a75d9033cf5bb5991 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-15T01:35:33,245 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/.tmp/cf/3f0fe7b8212548edb6857de692041575 is 28, key is 2/cf:/1734226533037/Put/seqid=0 2024-12-15T01:35:33,245 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/.tmp/cf/1fe378d21c6a43abab2fb7bf021ab919 is 28, key is 1/cf:/1734226533032/Put/seqid=0 2024-12-15T01:35:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742115_1291 (size=4945) 2024-12-15T01:35:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742116_1292 (size=4945) 2024-12-15T01:35:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742116_1292 (size=4945) 2024-12-15T01:35:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742115_1291 (size=4945) 2024-12-15T01:35:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742116_1292 (size=4945) 2024-12-15T01:35:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742115_1291 (size=4945) 2024-12-15T01:35:33,251 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/.tmp/cf/1fe378d21c6a43abab2fb7bf021ab919 2024-12-15T01:35:33,251 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/.tmp/cf/3f0fe7b8212548edb6857de692041575 2024-12-15T01:35:33,256 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/.tmp/cf/1fe378d21c6a43abab2fb7bf021ab919 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/cf/1fe378d21c6a43abab2fb7bf021ab919 2024-12-15T01:35:33,256 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/.tmp/cf/3f0fe7b8212548edb6857de692041575 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/cf/3f0fe7b8212548edb6857de692041575 2024-12-15T01:35:33,262 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/cf/1fe378d21c6a43abab2fb7bf021ab919, entries=1, sequenceid=5, filesize=4.8 K 2024-12-15T01:35:33,262 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/cf/3f0fe7b8212548edb6857de692041575, entries=1, sequenceid=5, filesize=4.8 K 2024-12-15T01:35:33,263 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for bf7c67a04da076d05e76ef76f915c1e2 in 40ms, sequenceid=5, compaction requested=false 2024-12-15T01:35:33,263 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d17accb6d8d6e80a75d9033cf5bb5991 in 39ms, sequenceid=5, compaction requested=false 2024-12-15T01:35:33,266 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-15T01:35:33,266 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-15T01:35:33,267 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:35:33,267 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2. 2024-12-15T01:35:33,267 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:35:33,267 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for bf7c67a04da076d05e76ef76f915c1e2: 2024-12-15T01:35:33,267 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991. 2024-12-15T01:35:33,267 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for d17accb6d8d6e80a75d9033cf5bb5991: 2024-12-15T01:35:33,268 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:33,269 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=d17accb6d8d6e80a75d9033cf5bb5991, regionState=CLOSED 2024-12-15T01:35:33,269 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:33,269 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=bf7c67a04da076d05e76ef76f915c1e2, regionState=CLOSED 2024-12-15T01:35:33,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=117 2024-12-15T01:35:33,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=117, state=SUCCESS; CloseRegionProcedure d17accb6d8d6e80a75d9033cf5bb5991, server=66d575e28c61,35281,1734226422799 in 199 msec 2024-12-15T01:35:33,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=116 2024-12-15T01:35:33,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=116, state=SUCCESS; CloseRegionProcedure bf7c67a04da076d05e76ef76f915c1e2, server=66d575e28c61,39381,1734226422872 in 200 msec 2024-12-15T01:35:33,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d17accb6d8d6e80a75d9033cf5bb5991, UNASSIGN in 204 msec 2024-12-15T01:35:33,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-15T01:35:33,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bf7c67a04da076d05e76ef76f915c1e2, UNASSIGN in 204 msec 2024-12-15T01:35:33,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742117_1293 (size=84) 2024-12-15T01:35:33,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742117_1293 (size=84) 2024-12-15T01:35:33,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742117_1293 (size=84) 2024-12-15T01:35:33,284 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:33,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742118_1294 (size=20) 2024-12-15T01:35:33,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742118_1294 (size=20) 2024-12-15T01:35:33,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742118_1294 (size=20) 2024-12-15T01:35:33,294 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:33,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742119_1295 (size=21) 2024-12-15T01:35:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742119_1295 (size=21) 2024-12-15T01:35:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742119_1295 (size=21) 2024-12-15T01:35:33,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742120_1296 (size=84) 2024-12-15T01:35:33,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742120_1296 (size=84) 2024-12-15T01:35:33,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742120_1296 (size=84) 2024-12-15T01:35:33,306 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:33,314 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-15T01:35:33,316 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532420.bf7c67a04da076d05e76ef76f915c1e2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:33,316 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734226532420.d17accb6d8d6e80a75d9033cf5bb5991.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:33,316 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:33,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45053ccce96bc6a9f3f1b388ece26995, ASSIGN}] 2024-12-15T01:35:33,343 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45053ccce96bc6a9f3f1b388ece26995, ASSIGN 2024-12-15T01:35:33,344 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45053ccce96bc6a9f3f1b388ece26995, ASSIGN; state=MERGED, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:35:33,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-15T01:35:33,416 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0005_000001 (auth:SIMPLE) from 127.0.0.1:52636 2024-12-15T01:35:33,426 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0005/container_1734226430009_0005_01_000001/launch_container.sh] 2024-12-15T01:35:33,427 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0005/container_1734226430009_0005_01_000001/container_tokens] 2024-12-15T01:35:33,427 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0005/container_1734226430009_0005_01_000001/sysfs] 2024-12-15T01:35:33,494 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-15T01:35:33,494 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=45053ccce96bc6a9f3f1b388ece26995, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:33,496 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 45053ccce96bc6a9f3f1b388ece26995, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:35:33,648 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:33,652 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:33,652 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 45053ccce96bc6a9f3f1b388ece26995, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995.', STARTKEY => '', ENDKEY => ''} 2024-12-15T01:35:33,652 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. service=AccessControlService 2024-12-15T01:35:33,652 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:33,653 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:33,653 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:33,653 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:33,653 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:33,654 INFO [StoreOpener-45053ccce96bc6a9f3f1b388ece26995-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:33,655 INFO [StoreOpener-45053ccce96bc6a9f3f1b388ece26995-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45053ccce96bc6a9f3f1b388ece26995 columnFamilyName cf 2024-12-15T01:35:33,656 DEBUG [StoreOpener-45053ccce96bc6a9f3f1b388ece26995-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-15T01:35:33,674 DEBUG [StoreOpener-45053ccce96bc6a9f3f1b388ece26995-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/1fe378d21c6a43abab2fb7bf021ab919.bf7c67a04da076d05e76ef76f915c1e2->hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/cf/1fe378d21c6a43abab2fb7bf021ab919-top 2024-12-15T01:35:33,678 DEBUG [StoreOpener-45053ccce96bc6a9f3f1b388ece26995-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/3f0fe7b8212548edb6857de692041575.d17accb6d8d6e80a75d9033cf5bb5991->hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/cf/3f0fe7b8212548edb6857de692041575-top 2024-12-15T01:35:33,679 INFO [StoreOpener-45053ccce96bc6a9f3f1b388ece26995-1 {}] regionserver.HStore(327): Store=45053ccce96bc6a9f3f1b388ece26995/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:33,680 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:33,681 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:33,682 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:33,683 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 45053ccce96bc6a9f3f1b388ece26995; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69122731, jitterRate=0.030008956789970398}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:33,684 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 45053ccce96bc6a9f3f1b388ece26995: 2024-12-15T01:35:33,684 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995., pid=121, masterSystemTime=1734226533648 2024-12-15T01:35:33,685 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995.,because compaction is disabled. 2024-12-15T01:35:33,686 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:33,686 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:33,686 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=45053ccce96bc6a9f3f1b388ece26995, regionState=OPEN, openSeqNum=9, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:33,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-15T01:35:33,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 45053ccce96bc6a9f3f1b388ece26995, server=66d575e28c61,39381,1734226422872 in 191 msec 2024-12-15T01:35:33,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-15T01:35:33,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45053ccce96bc6a9f3f1b388ece26995, ASSIGN in 346 msec 2024-12-15T01:35:33,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bf7c67a04da076d05e76ef76f915c1e2, d17accb6d8d6e80a75d9033cf5bb5991], force=true in 636 msec 2024-12-15T01:35:34,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-15T01:35:34,164 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-15T01:35:34,165 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-15T01:35:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226534165 (current time:1734226534165). 2024-12-15T01:35:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:35:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-15T01:35:34,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:35:34,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e27db1b to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ba208bc 2024-12-15T01:35:34,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22242861, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:34,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:34,184 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50740, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:34,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e27db1b to 127.0.0.1:64248 2024-12-15T01:35:34,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:34,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4705c891 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ca9cf25 2024-12-15T01:35:34,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f4d89f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:34,208 DEBUG [hconnection-0x9521ff2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:34,209 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:34,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:34,212 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:34,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4705c891 to 127.0.0.1:64248 2024-12-15T01:35:34,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:34,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-15T01:35:34,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:35:34,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-15T01:35:34,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-15T01:35:34,218 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:35:34,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-15T01:35:34,219 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:35:34,222 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:35:34,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742121_1297 (size=216) 2024-12-15T01:35:34,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742121_1297 (size=216) 2024-12-15T01:35:34,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742121_1297 (size=216) 2024-12-15T01:35:34,231 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:35:34,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 45053ccce96bc6a9f3f1b388ece26995}] 2024-12-15T01:35:34,232 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:34,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-15T01:35:34,382 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:34,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-15T01:35:34,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:34,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 45053ccce96bc6a9f3f1b388ece26995: 2024-12-15T01:35:34,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-15T01:35:34,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:34,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/1fe378d21c6a43abab2fb7bf021ab919.bf7c67a04da076d05e76ef76f915c1e2->hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/cf/1fe378d21c6a43abab2fb7bf021ab919-top, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/3f0fe7b8212548edb6857de692041575.d17accb6d8d6e80a75d9033cf5bb5991->hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/cf/3f0fe7b8212548edb6857de692041575-top] hfiles 2024-12-15T01:35:34,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/1fe378d21c6a43abab2fb7bf021ab919.bf7c67a04da076d05e76ef76f915c1e2 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/3f0fe7b8212548edb6857de692041575.d17accb6d8d6e80a75d9033cf5bb5991 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742122_1298 (size=269) 2024-12-15T01:35:34,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742122_1298 (size=269) 2024-12-15T01:35:34,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742122_1298 (size=269) 2024-12-15T01:35:34,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:34,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-15T01:35:34,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-15T01:35:34,394 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:34,394 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:34,397 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-15T01:35:34,397 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:35:34,397 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure 45053ccce96bc6a9f3f1b388ece26995 in 164 msec 2024-12-15T01:35:34,398 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:35:34,398 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:35:34,398 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,399 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742123_1299 (size=670) 2024-12-15T01:35:34,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742123_1299 (size=670) 2024-12-15T01:35:34,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742123_1299 (size=670) 2024-12-15T01:35:34,411 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:35:34,418 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:35:34,418 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,419 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:35:34,420 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-15T01:35:34,421 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 204 msec 2024-12-15T01:35:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-15T01:35:34,521 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-15T01:35:34,521 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521 2024-12-15T01:35:34,521 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:34,546 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:34,546 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,548 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:35:34,553 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:34,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742124_1300 (size=216) 2024-12-15T01:35:34,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742124_1300 (size=216) 2024-12-15T01:35:34,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742124_1300 (size=216) 2024-12-15T01:35:34,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742125_1301 (size=670) 2024-12-15T01:35:34,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742125_1301 (size=670) 2024-12-15T01:35:34,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742125_1301 (size=670) 2024-12-15T01:35:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-11728948678310763575.jar 2024-12-15T01:35:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:34,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,016 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:35:35,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-10248206215596693342.jar 2024-12-15T01:35:35,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-9733653593528727390.jar 2024-12-15T01:35:35,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:35:35,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:35:35,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:35:35,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:35:35,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:35:35,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:35:35,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:35:35,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:35:35,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:35:35,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:35:35,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:35:35,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:35:35,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:35:35,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:35,611 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:35,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:35:35,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:35,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:35:35,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:35:35,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:35:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742126_1302 (size=127628) 2024-12-15T01:35:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742126_1302 (size=127628) 2024-12-15T01:35:35,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742126_1302 (size=127628) 2024-12-15T01:35:35,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742127_1303 (size=2172137) 2024-12-15T01:35:35,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742127_1303 (size=2172137) 2024-12-15T01:35:35,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742127_1303 (size=2172137) 2024-12-15T01:35:35,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742128_1304 (size=213228) 2024-12-15T01:35:35,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742128_1304 (size=213228) 2024-12-15T01:35:35,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742128_1304 (size=213228) 2024-12-15T01:35:35,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742129_1305 (size=1877034) 2024-12-15T01:35:35,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742129_1305 (size=1877034) 2024-12-15T01:35:35,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742129_1305 (size=1877034) 2024-12-15T01:35:35,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742130_1306 (size=533455) 2024-12-15T01:35:35,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742130_1306 (size=533455) 2024-12-15T01:35:35,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742130_1306 (size=533455) 2024-12-15T01:35:35,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742131_1307 (size=7280644) 2024-12-15T01:35:35,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742131_1307 (size=7280644) 2024-12-15T01:35:35,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742131_1307 (size=7280644) 2024-12-15T01:35:35,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742132_1308 (size=4188619) 2024-12-15T01:35:35,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742132_1308 (size=4188619) 2024-12-15T01:35:35,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742132_1308 (size=4188619) 2024-12-15T01:35:35,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742133_1309 (size=20406) 2024-12-15T01:35:35,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742133_1309 (size=20406) 2024-12-15T01:35:35,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742133_1309 (size=20406) 2024-12-15T01:35:35,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742134_1310 (size=75495) 2024-12-15T01:35:35,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742134_1310 (size=75495) 2024-12-15T01:35:35,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742134_1310 (size=75495) 2024-12-15T01:35:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742135_1311 (size=45609) 2024-12-15T01:35:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742135_1311 (size=45609) 2024-12-15T01:35:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742135_1311 (size=45609) 2024-12-15T01:35:35,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742136_1312 (size=110084) 2024-12-15T01:35:35,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742136_1312 (size=110084) 2024-12-15T01:35:35,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742136_1312 (size=110084) 2024-12-15T01:35:35,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742137_1313 (size=451756) 2024-12-15T01:35:35,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742137_1313 (size=451756) 2024-12-15T01:35:35,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742137_1313 (size=451756) 2024-12-15T01:35:35,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742138_1314 (size=1323991) 2024-12-15T01:35:35,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742138_1314 (size=1323991) 2024-12-15T01:35:35,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742138_1314 (size=1323991) 2024-12-15T01:35:35,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742139_1315 (size=23076) 2024-12-15T01:35:35,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742139_1315 (size=23076) 2024-12-15T01:35:35,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742139_1315 (size=23076) 2024-12-15T01:35:35,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742140_1316 (size=126803) 2024-12-15T01:35:35,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742140_1316 (size=126803) 2024-12-15T01:35:35,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742140_1316 (size=126803) 2024-12-15T01:35:35,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742141_1317 (size=322274) 2024-12-15T01:35:35,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742141_1317 (size=322274) 2024-12-15T01:35:35,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742141_1317 (size=322274) 2024-12-15T01:35:35,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742142_1318 (size=1832290) 2024-12-15T01:35:35,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742142_1318 (size=1832290) 2024-12-15T01:35:35,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742142_1318 (size=1832290) 2024-12-15T01:35:35,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742143_1319 (size=30081) 2024-12-15T01:35:35,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742143_1319 (size=30081) 2024-12-15T01:35:35,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742143_1319 (size=30081) 2024-12-15T01:35:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742144_1320 (size=53616) 2024-12-15T01:35:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742144_1320 (size=53616) 2024-12-15T01:35:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742144_1320 (size=53616) 2024-12-15T01:35:35,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742145_1321 (size=29229) 2024-12-15T01:35:35,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742145_1321 (size=29229) 2024-12-15T01:35:35,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742145_1321 (size=29229) 2024-12-15T01:35:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742146_1322 (size=169089) 2024-12-15T01:35:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742146_1322 (size=169089) 2024-12-15T01:35:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742146_1322 (size=169089) 2024-12-15T01:35:35,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742147_1323 (size=5175431) 2024-12-15T01:35:35,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742147_1323 (size=5175431) 2024-12-15T01:35:35,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742147_1323 (size=5175431) 2024-12-15T01:35:35,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742148_1324 (size=136454) 2024-12-15T01:35:35,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742148_1324 (size=136454) 2024-12-15T01:35:35,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742148_1324 (size=136454) 2024-12-15T01:35:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742149_1325 (size=6350909) 2024-12-15T01:35:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742149_1325 (size=6350909) 2024-12-15T01:35:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742149_1325 (size=6350909) 2024-12-15T01:35:35,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742150_1326 (size=3317408) 2024-12-15T01:35:35,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742150_1326 (size=3317408) 2024-12-15T01:35:35,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742150_1326 (size=3317408) 2024-12-15T01:35:35,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742151_1327 (size=503880) 2024-12-15T01:35:35,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742151_1327 (size=503880) 2024-12-15T01:35:35,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742151_1327 (size=503880) 2024-12-15T01:35:35,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742152_1328 (size=912094) 2024-12-15T01:35:35,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742152_1328 (size=912094) 2024-12-15T01:35:35,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742152_1328 (size=912094) 2024-12-15T01:35:35,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742153_1329 (size=4695811) 2024-12-15T01:35:35,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742153_1329 (size=4695811) 2024-12-15T01:35:35,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742153_1329 (size=4695811) 2024-12-15T01:35:35,939 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:35:35,940 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-15T01:35:35,942 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-15T01:35:35,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742154_1330 (size=378) 2024-12-15T01:35:35,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742154_1330 (size=378) 2024-12-15T01:35:35,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742154_1330 (size=378) 2024-12-15T01:35:35,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742155_1331 (size=15) 2024-12-15T01:35:35,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742155_1331 (size=15) 2024-12-15T01:35:35,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742155_1331 (size=15) 2024-12-15T01:35:35,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742156_1332 (size=304992) 2024-12-15T01:35:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742156_1332 (size=304992) 2024-12-15T01:35:35,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742156_1332 (size=304992) 2024-12-15T01:35:35,985 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:35:35,986 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:35:36,322 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0006_000001 (auth:SIMPLE) from 127.0.0.1:51022 2024-12-15T01:35:40,818 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:35:41,480 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0006_000001 (auth:SIMPLE) from 127.0.0.1:34440 2024-12-15T01:35:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742157_1333 (size=350666) 2024-12-15T01:35:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742157_1333 (size=350666) 2024-12-15T01:35:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742157_1333 (size=350666) 2024-12-15T01:35:42,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:42,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-15T01:35:43,750 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0006_000001 (auth:SIMPLE) from 127.0.0.1:51660 2024-12-15T01:35:45,658 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 9b718fb94e54ecba6d4eaf293307b363 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:35:45,658 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 7776d3792f58052a078812d95f884c33 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:35:46,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742158_1334 (size=4945) 2024-12-15T01:35:46,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742158_1334 (size=4945) 2024-12-15T01:35:46,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742158_1334 (size=4945) 2024-12-15T01:35:46,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742159_1335 (size=4945) 2024-12-15T01:35:46,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742159_1335 (size=4945) 2024-12-15T01:35:46,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742159_1335 (size=4945) 2024-12-15T01:35:46,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742160_1336 (size=17474) 2024-12-15T01:35:46,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742160_1336 (size=17474) 2024-12-15T01:35:46,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742160_1336 (size=17474) 2024-12-15T01:35:46,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742161_1337 (size=482) 2024-12-15T01:35:46,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742161_1337 (size=482) 2024-12-15T01:35:46,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742161_1337 (size=482) 2024-12-15T01:35:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742162_1338 (size=17474) 2024-12-15T01:35:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742162_1338 (size=17474) 2024-12-15T01:35:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742162_1338 (size=17474) 2024-12-15T01:35:46,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742163_1339 (size=350666) 2024-12-15T01:35:46,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742163_1339 (size=350666) 2024-12-15T01:35:46,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742163_1339 (size=350666) 2024-12-15T01:35:46,506 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0006_000001 (auth:SIMPLE) from 127.0.0.1:51676 2024-12-15T01:35:46,522 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0006/container_1734226430009_0006_01_000002/launch_container.sh] 2024-12-15T01:35:46,522 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0006/container_1734226430009_0006_01_000002/container_tokens] 2024-12-15T01:35:46,522 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0006/container_1734226430009_0006_01_000002/sysfs] 2024-12-15T01:35:48,325 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:35:48,326 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:35:48,337 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,338 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:35:48,338 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:35:48,338 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-15T01:35:48,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-15T01:35:48,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,340 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-15T01:35:48,340 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226534521/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-15T01:35:48,348 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,349 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,352 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226548352"}]},"ts":"1734226548352"} 2024-12-15T01:35:48,355 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-15T01:35:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T01:35:48,412 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-15T01:35:48,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-15T01:35:48,414 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45053ccce96bc6a9f3f1b388ece26995, UNASSIGN}] 2024-12-15T01:35:48,415 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45053ccce96bc6a9f3f1b388ece26995, UNASSIGN 2024-12-15T01:35:48,415 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=45053ccce96bc6a9f3f1b388ece26995, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:48,417 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:35:48,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 45053ccce96bc6a9f3f1b388ece26995, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:35:48,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T01:35:48,568 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:48,569 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:48,569 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:35:48,569 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 45053ccce96bc6a9f3f1b388ece26995, disabling compactions & flushes 2024-12-15T01:35:48,569 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:48,569 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:48,569 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. after waiting 0 ms 2024-12-15T01:35:48,569 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:48,574 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-15T01:35:48,575 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:35:48,575 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995. 2024-12-15T01:35:48,575 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 45053ccce96bc6a9f3f1b388ece26995: 2024-12-15T01:35:48,577 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:48,578 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=45053ccce96bc6a9f3f1b388ece26995, regionState=CLOSED 2024-12-15T01:35:48,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-15T01:35:48,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 45053ccce96bc6a9f3f1b388ece26995, server=66d575e28c61,39381,1734226422872 in 162 msec 2024-12-15T01:35:48,582 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-15T01:35:48,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45053ccce96bc6a9f3f1b388ece26995, UNASSIGN in 167 msec 2024-12-15T01:35:48,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-15T01:35:48,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 170 msec 2024-12-15T01:35:48,586 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226548585"}]},"ts":"1734226548585"} 2024-12-15T01:35:48,587 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-15T01:35:48,628 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-15T01:35:48,630 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 280 msec 2024-12-15T01:35:48,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T01:35:48,658 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-15T01:35:48,659 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,661 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,661 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,663 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,666 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:48,666 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:48,666 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:48,668 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/recovered.edits] 2024-12-15T01:35:48,668 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/recovered.edits] 2024-12-15T01:35:48,668 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/recovered.edits] 2024-12-15T01:35:48,673 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/cf/3f0fe7b8212548edb6857de692041575 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/cf/3f0fe7b8212548edb6857de692041575 2024-12-15T01:35:48,674 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/cf/1fe378d21c6a43abab2fb7bf021ab919 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/cf/1fe378d21c6a43abab2fb7bf021ab919 2024-12-15T01:35:48,675 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/1fe378d21c6a43abab2fb7bf021ab919.bf7c67a04da076d05e76ef76f915c1e2 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/1fe378d21c6a43abab2fb7bf021ab919.bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:48,675 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/3f0fe7b8212548edb6857de692041575.d17accb6d8d6e80a75d9033cf5bb5991 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/cf/3f0fe7b8212548edb6857de692041575.d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:48,679 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/recovered.edits/8.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991/recovered.edits/8.seqid 2024-12-15T01:35:48,679 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d17accb6d8d6e80a75d9033cf5bb5991 2024-12-15T01:35:48,680 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/recovered.edits/8.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2/recovered.edits/8.seqid 2024-12-15T01:35:48,680 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/recovered.edits/12.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995/recovered.edits/12.seqid 2024-12-15T01:35:48,680 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bf7c67a04da076d05e76ef76f915c1e2 2024-12-15T01:35:48,681 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45053ccce96bc6a9f3f1b388ece26995 2024-12-15T01:35:48,681 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-15T01:35:48,683 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,685 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-15T01:35:48,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,688 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-15T01:35:48,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-15T01:35:48,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-15T01:35:48,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-15T01:35:48,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-15T01:35:48,689 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,689 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-15T01:35:48,689 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226548689"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:48,691 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-15T01:35:48,691 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 45053ccce96bc6a9f3f1b388ece26995, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995.', STARTKEY => '', ENDKEY => ''}] 2024-12-15T01:35:48,692 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-15T01:35:48,692 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226548692"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:48,694 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-15T01:35:48,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:48,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:48,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:48,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:48,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-15T01:35:48,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:48,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:48,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:48,705 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:48,705 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:48,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 46 msec 2024-12-15T01:35:48,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-15T01:35:48,797 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-15T01:35:48,798 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:48,798 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:48,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-15T01:35:48,801 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226548800"}]},"ts":"1734226548800"} 2024-12-15T01:35:48,802 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-15T01:35:48,834 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-15T01:35:48,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-15T01:35:48,837 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7776d3792f58052a078812d95f884c33, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9b718fb94e54ecba6d4eaf293307b363, UNASSIGN}] 2024-12-15T01:35:48,838 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9b718fb94e54ecba6d4eaf293307b363, UNASSIGN 2024-12-15T01:35:48,839 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7776d3792f58052a078812d95f884c33, UNASSIGN 2024-12-15T01:35:48,840 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=7776d3792f58052a078812d95f884c33, regionState=CLOSING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:35:48,841 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=9b718fb94e54ecba6d4eaf293307b363, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:48,842 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:35:48,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=131, state=RUNNABLE; CloseRegionProcedure 7776d3792f58052a078812d95f884c33, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:35:48,844 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:35:48,844 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE; CloseRegionProcedure 9b718fb94e54ecba6d4eaf293307b363, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:35:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-15T01:35:48,995 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:35:48,995 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:48,995 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:35:48,996 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 7776d3792f58052a078812d95f884c33, disabling compactions & flushes 2024-12-15T01:35:48,996 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:48,996 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:48,996 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. after waiting 0 ms 2024-12-15T01:35:48,996 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:48,996 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:48,996 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:48,996 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:35:48,996 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing 9b718fb94e54ecba6d4eaf293307b363, disabling compactions & flushes 2024-12-15T01:35:48,997 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:48,997 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:48,997 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. after waiting 0 ms 2024-12-15T01:35:48,997 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:49,000 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:35:49,000 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:35:49,000 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:35:49,000 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:35:49,000 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363. 2024-12-15T01:35:49,000 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33. 2024-12-15T01:35:49,000 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for 9b718fb94e54ecba6d4eaf293307b363: 2024-12-15T01:35:49,000 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 7776d3792f58052a078812d95f884c33: 2024-12-15T01:35:49,002 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed 9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:49,003 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=9b718fb94e54ecba6d4eaf293307b363, regionState=CLOSED 2024-12-15T01:35:49,003 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 7776d3792f58052a078812d95f884c33 2024-12-15T01:35:49,003 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=7776d3792f58052a078812d95f884c33, regionState=CLOSED 2024-12-15T01:35:49,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=132 2024-12-15T01:35:49,005 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=131 2024-12-15T01:35:49,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=132, state=SUCCESS; CloseRegionProcedure 9b718fb94e54ecba6d4eaf293307b363, server=66d575e28c61,35281,1734226422799 in 160 msec 2024-12-15T01:35:49,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=131, state=SUCCESS; CloseRegionProcedure 7776d3792f58052a078812d95f884c33, server=66d575e28c61,42663,1734226422715 in 162 msec 2024-12-15T01:35:49,006 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9b718fb94e54ecba6d4eaf293307b363, UNASSIGN in 168 msec 2024-12-15T01:35:49,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-15T01:35:49,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7776d3792f58052a078812d95f884c33, UNASSIGN in 169 msec 2024-12-15T01:35:49,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-15T01:35:49,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 172 msec 2024-12-15T01:35:49,009 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226549009"}]},"ts":"1734226549009"} 2024-12-15T01:35:49,010 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-15T01:35:49,027 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-15T01:35:49,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 230 msec 2024-12-15T01:35:49,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-15T01:35:49,103 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-15T01:35:49,104 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,106 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,109 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,110 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,113 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33 2024-12-15T01:35:49,113 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:49,115 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/recovered.edits] 2024-12-15T01:35:49,115 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/recovered.edits] 2024-12-15T01:35:49,119 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/cf/e1fca6fda98e40f58bfe399a639e55e8 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/cf/e1fca6fda98e40f58bfe399a639e55e8 2024-12-15T01:35:49,120 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/cf/8a12fe24ec2a4ec88d2b5831f19c8271 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/cf/8a12fe24ec2a4ec88d2b5831f19c8271 2024-12-15T01:35:49,123 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33/recovered.edits/9.seqid 2024-12-15T01:35:49,122 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363/recovered.edits/9.seqid 2024-12-15T01:35:49,123 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/7776d3792f58052a078812d95f884c33 2024-12-15T01:35:49,123 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithMergeRegion/9b718fb94e54ecba6d4eaf293307b363 2024-12-15T01:35:49,124 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-15T01:35:49,126 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,129 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-15T01:35:49,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,130 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-15T01:35:49,130 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-15T01:35:49,130 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-15T01:35:49,132 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-15T01:35:49,137 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,137 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-15T01:35:49,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,137 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226549137"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:49,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,138 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226549137"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:49,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,138 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-15T01:35:49,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-15T01:35:49,141 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:35:49,141 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7776d3792f58052a078812d95f884c33, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734226530508.7776d3792f58052a078812d95f884c33.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9b718fb94e54ecba6d4eaf293307b363, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734226530508.9b718fb94e54ecba6d4eaf293307b363.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:35:49,141 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-15T01:35:49,141 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226549141"}]},"ts":"9223372036854775807"} 2024-12-15T01:35:49,143 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-15T01:35:49,154 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,156 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 50 msec 2024-12-15T01:35:49,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-15T01:35:49,241 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-15T01:35:49,249 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-15T01:35:49,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,252 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-15T01:35:49,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:49,255 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-15T01:35:49,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:49,277 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=806 (was 800) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:38624 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1105113522_1 at /127.0.0.1:51210 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:33309 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 66853) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:38004 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:51240 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4728 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-34 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/66d575e28c61:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=810 (was 814), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=570 (was 573), ProcessCount=17 (was 17), AvailableMemoryMB=3409 (was 3585) 2024-12-15T01:35:49,277 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-15T01:35:49,294 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=806, OpenFileDescriptor=810, MaxFileDescriptor=1048576, SystemLoadAverage=570, ProcessCount=17, AvailableMemoryMB=3409 2024-12-15T01:35:49,294 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-15T01:35:49,296 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:35:49,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:35:49,298 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:35:49,298 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:49,298 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-15T01:35:49,299 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:35:49,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T01:35:49,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742164_1340 (size=407) 2024-12-15T01:35:49,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742164_1340 (size=407) 2024-12-15T01:35:49,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742164_1340 (size=407) 2024-12-15T01:35:49,307 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => be1c2fa34021a22845f906c49f0630b3, NAME => 'testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:49,307 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 670f0adb452d72ff28029f7d197d3878, NAME => 'testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:49,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742166_1342 (size=68) 2024-12-15T01:35:49,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742166_1342 (size=68) 2024-12-15T01:35:49,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742166_1342 (size=68) 2024-12-15T01:35:49,316 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:49,316 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing be1c2fa34021a22845f906c49f0630b3, disabling compactions & flushes 2024-12-15T01:35:49,316 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:49,316 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:49,316 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. after waiting 0 ms 2024-12-15T01:35:49,316 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:49,316 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:49,317 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for be1c2fa34021a22845f906c49f0630b3: 2024-12-15T01:35:49,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742165_1341 (size=68) 2024-12-15T01:35:49,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742165_1341 (size=68) 2024-12-15T01:35:49,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742165_1341 (size=68) 2024-12-15T01:35:49,322 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:49,322 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 670f0adb452d72ff28029f7d197d3878, disabling compactions & flushes 2024-12-15T01:35:49,322 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:49,322 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:49,322 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. after waiting 0 ms 2024-12-15T01:35:49,322 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:49,322 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:49,322 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 670f0adb452d72ff28029f7d197d3878: 2024-12-15T01:35:49,323 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:35:49,323 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734226549323"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226549323"}]},"ts":"1734226549323"} 2024-12-15T01:35:49,323 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734226549323"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226549323"}]},"ts":"1734226549323"} 2024-12-15T01:35:49,325 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:35:49,326 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:35:49,326 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226549326"}]},"ts":"1734226549326"} 2024-12-15T01:35:49,327 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-15T01:35:49,345 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:35:49,347 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:35:49,347 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:35:49,347 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:35:49,347 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:35:49,347 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:35:49,347 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:35:49,347 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:35:49,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=670f0adb452d72ff28029f7d197d3878, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=be1c2fa34021a22845f906c49f0630b3, ASSIGN}] 2024-12-15T01:35:49,348 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=be1c2fa34021a22845f906c49f0630b3, ASSIGN 2024-12-15T01:35:49,348 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=670f0adb452d72ff28029f7d197d3878, ASSIGN 2024-12-15T01:35:49,349 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=be1c2fa34021a22845f906c49f0630b3, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:35:49,349 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=670f0adb452d72ff28029f7d197d3878, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:35:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T01:35:49,499 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:35:49,500 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=670f0adb452d72ff28029f7d197d3878, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:49,500 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=be1c2fa34021a22845f906c49f0630b3, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:49,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=137, state=RUNNABLE; OpenRegionProcedure 670f0adb452d72ff28029f7d197d3878, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:35:49,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=138, state=RUNNABLE; OpenRegionProcedure be1c2fa34021a22845f906c49f0630b3, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:35:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T01:35:49,656 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:49,658 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:49,660 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:49,660 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => 670f0adb452d72ff28029f7d197d3878, NAME => 'testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:35:49,660 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. service=AccessControlService 2024-12-15T01:35:49,660 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:49,661 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:49,661 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:49,661 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:49,661 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:49,661 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:49,661 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => be1c2fa34021a22845f906c49f0630b3, NAME => 'testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:35:49,662 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. service=AccessControlService 2024-12-15T01:35:49,662 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:49,662 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,662 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:49,662 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,662 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,663 INFO [StoreOpener-670f0adb452d72ff28029f7d197d3878-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:49,664 INFO [StoreOpener-be1c2fa34021a22845f906c49f0630b3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,665 INFO [StoreOpener-670f0adb452d72ff28029f7d197d3878-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 670f0adb452d72ff28029f7d197d3878 columnFamilyName cf 2024-12-15T01:35:49,665 DEBUG [StoreOpener-670f0adb452d72ff28029f7d197d3878-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:49,666 INFO [StoreOpener-670f0adb452d72ff28029f7d197d3878-1 {}] regionserver.HStore(327): Store=670f0adb452d72ff28029f7d197d3878/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:49,666 INFO [StoreOpener-be1c2fa34021a22845f906c49f0630b3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be1c2fa34021a22845f906c49f0630b3 columnFamilyName cf 2024-12-15T01:35:49,666 DEBUG [StoreOpener-be1c2fa34021a22845f906c49f0630b3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:49,667 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:49,667 INFO [StoreOpener-be1c2fa34021a22845f906c49f0630b3-1 {}] regionserver.HStore(327): Store=be1c2fa34021a22845f906c49f0630b3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:49,667 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:49,668 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,668 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,670 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:49,671 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,672 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:49,672 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened 670f0adb452d72ff28029f7d197d3878; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68764398, jitterRate=0.024669378995895386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:49,673 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:49,673 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for 670f0adb452d72ff28029f7d197d3878: 2024-12-15T01:35:49,673 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened be1c2fa34021a22845f906c49f0630b3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62133905, jitterRate=-0.07413266599178314}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:49,673 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for be1c2fa34021a22845f906c49f0630b3: 2024-12-15T01:35:49,674 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3., pid=140, masterSystemTime=1734226549658 2024-12-15T01:35:49,674 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878., pid=139, masterSystemTime=1734226549655 2024-12-15T01:35:49,675 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:49,675 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:49,675 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=be1c2fa34021a22845f906c49f0630b3, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:49,675 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:49,676 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:49,676 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=670f0adb452d72ff28029f7d197d3878, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:35:49,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=138 2024-12-15T01:35:49,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=138, state=SUCCESS; OpenRegionProcedure be1c2fa34021a22845f906c49f0630b3, server=66d575e28c61,39381,1734226422872 in 173 msec 2024-12-15T01:35:49,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=137 2024-12-15T01:35:49,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=137, state=SUCCESS; OpenRegionProcedure 670f0adb452d72ff28029f7d197d3878, server=66d575e28c61,35281,1734226422799 in 174 msec 2024-12-15T01:35:49,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=be1c2fa34021a22845f906c49f0630b3, ASSIGN in 331 msec 2024-12-15T01:35:49,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-15T01:35:49,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=670f0adb452d72ff28029f7d197d3878, ASSIGN in 331 msec 2024-12-15T01:35:49,680 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:35:49,681 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226549680"}]},"ts":"1734226549680"} 2024-12-15T01:35:49,685 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-15T01:35:49,721 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:35:49,721 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-15T01:35:49,723 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-15T01:35:49,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:49,737 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:49,737 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:49,737 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:49,737 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:49,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 441 msec 2024-12-15T01:35:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T01:35:49,905 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-15T01:35:49,905 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-15T01:35:49,905 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:49,913 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-15T01:35:49,913 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:49,913 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-15T01:35:49,916 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-15T01:35:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226549916 (current time:1734226549916). 2024-12-15T01:35:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:35:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-15T01:35:49,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:35:49,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3817e478 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c663c0c 2024-12-15T01:35:49,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3562fb04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:49,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:49,930 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3817e478 to 127.0.0.1:64248 2024-12-15T01:35:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78c0a4e7 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@259b71af 2024-12-15T01:35:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78304c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:49,948 DEBUG [hconnection-0x6548bee5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:49,949 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:49,952 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78c0a4e7 to 127.0.0.1:64248 2024-12-15T01:35:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-15T01:35:49,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:35:49,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-15T01:35:49,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-15T01:35:49,956 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:35:49,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-15T01:35:49,957 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:35:49,960 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:35:49,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742167_1343 (size=170) 2024-12-15T01:35:49,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742167_1343 (size=170) 2024-12-15T01:35:49,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742167_1343 (size=170) 2024-12-15T01:35:49,969 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:35:49,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3}] 2024-12-15T01:35:49,970 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:49,970 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:50,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-15T01:35:50,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:50,122 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:50,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-15T01:35:50,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:50,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-15T01:35:50,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:50,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for 670f0adb452d72ff28029f7d197d3878: 2024-12-15T01:35:50,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for be1c2fa34021a22845f906c49f0630b3: 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:50,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:35:50,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742169_1345 (size=71) 2024-12-15T01:35:50,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742169_1345 (size=71) 2024-12-15T01:35:50,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742169_1345 (size=71) 2024-12-15T01:35:50,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:50,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-15T01:35:50,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-15T01:35:50,141 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:50,141 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:50,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3 in 173 msec 2024-12-15T01:35:50,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742168_1344 (size=71) 2024-12-15T01:35:50,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742168_1344 (size=71) 2024-12-15T01:35:50,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742168_1344 (size=71) 2024-12-15T01:35:50,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:50,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-15T01:35:50,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-15T01:35:50,149 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:50,149 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:50,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-15T01:35:50,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878 in 181 msec 2024-12-15T01:35:50,151 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:35:50,152 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:35:50,152 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:35:50,152 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,153 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742170_1346 (size=552) 2024-12-15T01:35:50,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742170_1346 (size=552) 2024-12-15T01:35:50,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742170_1346 (size=552) 2024-12-15T01:35:50,162 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:35:50,167 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:35:50,167 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,168 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:35:50,168 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-15T01:35:50,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 214 msec 2024-12-15T01:35:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-15T01:35:50,261 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-15T01:35:50,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:35:50,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:35:50,277 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-15T01:35:50,277 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:50,277 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:50,288 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-15T01:35:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226550288 (current time:1734226550288). 2024-12-15T01:35:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:35:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-15T01:35:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:35:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79007083 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@591f46d0 2024-12-15T01:35:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@570a37a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:50,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:50,335 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:50,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79007083 to 127.0.0.1:64248 2024-12-15T01:35:50,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d52464c to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3ed8baf0 2024-12-15T01:35:50,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38691591, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:50,356 DEBUG [hconnection-0x3a75ba68-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:50,357 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:50,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:50,359 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:50,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d52464c to 127.0.0.1:64248 2024-12-15T01:35:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-15T01:35:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:35:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-15T01:35:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-15T01:35:50,362 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:35:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-15T01:35:50,362 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:35:50,364 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:35:50,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742171_1347 (size=165) 2024-12-15T01:35:50,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742171_1347 (size=165) 2024-12-15T01:35:50,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742171_1347 (size=165) 2024-12-15T01:35:50,369 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:35:50,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3}] 2024-12-15T01:35:50,370 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:50,370 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:50,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-15T01:35:50,521 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:35:50,521 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:50,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-15T01:35:50,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-15T01:35:50,523 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:50,523 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:50,523 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 670f0adb452d72ff28029f7d197d3878 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-15T01:35:50,524 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing be1c2fa34021a22845f906c49f0630b3 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-15T01:35:50,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/.tmp/cf/16ff105b593242caa76d039ceb8b04a3 is 71, key is 1c798092c37526770cc41441529627e9/cf:q/1734226550274/Put/seqid=0 2024-12-15T01:35:50,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/.tmp/cf/70347d0959434dce86c4ac7d910edb77 is 71, key is 08156dc92363026c4efbc1dacf016894/cf:q/1734226550271/Put/seqid=0 2024-12-15T01:35:50,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742172_1348 (size=8256) 2024-12-15T01:35:50,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742172_1348 (size=8256) 2024-12-15T01:35:50,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742173_1349 (size=5356) 2024-12-15T01:35:50,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742172_1348 (size=8256) 2024-12-15T01:35:50,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742173_1349 (size=5356) 2024-12-15T01:35:50,547 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/.tmp/cf/16ff105b593242caa76d039ceb8b04a3 2024-12-15T01:35:50,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742173_1349 (size=5356) 2024-12-15T01:35:50,547 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/.tmp/cf/70347d0959434dce86c4ac7d910edb77 2024-12-15T01:35:50,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/.tmp/cf/16ff105b593242caa76d039ceb8b04a3 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/cf/16ff105b593242caa76d039ceb8b04a3 2024-12-15T01:35:50,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/.tmp/cf/70347d0959434dce86c4ac7d910edb77 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/cf/70347d0959434dce86c4ac7d910edb77 2024-12-15T01:35:50,555 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/cf/16ff105b593242caa76d039ceb8b04a3, entries=46, sequenceid=6, filesize=8.1 K 2024-12-15T01:35:50,556 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for be1c2fa34021a22845f906c49f0630b3 in 33ms, sequenceid=6, compaction requested=false 2024-12-15T01:35:50,556 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/cf/70347d0959434dce86c4ac7d910edb77, entries=4, sequenceid=6, filesize=5.2 K 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for be1c2fa34021a22845f906c49f0630b3: 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. for snaptb0-testExportExpiredSnapshot completed. 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,556 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 670f0adb452d72ff28029f7d197d3878 in 33ms, sequenceid=6, compaction requested=false 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/cf/16ff105b593242caa76d039ceb8b04a3] hfiles 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 670f0adb452d72ff28029f7d197d3878: 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/cf/16ff105b593242caa76d039ceb8b04a3 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. for snaptb0-testExportExpiredSnapshot completed. 2024-12-15T01:35:50,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:50,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/cf/70347d0959434dce86c4ac7d910edb77] hfiles 2024-12-15T01:35:50,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/cf/70347d0959434dce86c4ac7d910edb77 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742174_1350 (size=110) 2024-12-15T01:35:50,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742175_1351 (size=110) 2024-12-15T01:35:50,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742174_1350 (size=110) 2024-12-15T01:35:50,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742175_1351 (size=110) 2024-12-15T01:35:50,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742174_1350 (size=110) 2024-12-15T01:35:50,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742175_1351 (size=110) 2024-12-15T01:35:50,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:35:50,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:35:50,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-15T01:35:50,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-15T01:35:50,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-15T01:35:50,564 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:50,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-15T01:35:50,564 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:35:50,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:50,564 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:35:50,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure 670f0adb452d72ff28029f7d197d3878 in 196 msec 2024-12-15T01:35:50,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=144 2024-12-15T01:35:50,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure be1c2fa34021a22845f906c49f0630b3 in 196 msec 2024-12-15T01:35:50,566 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:35:50,566 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:35:50,566 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:35:50,566 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,567 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742176_1352 (size=630) 2024-12-15T01:35:50,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742176_1352 (size=630) 2024-12-15T01:35:50,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742176_1352 (size=630) 2024-12-15T01:35:50,576 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:35:50,580 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:35:50,580 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-15T01:35:50,581 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:35:50,581 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-15T01:35:50,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 221 msec 2024-12-15T01:35:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-15T01:35:50,666 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-15T01:35:50,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:35:50,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-15T01:35:50,669 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:35:50,669 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:50,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-15T01:35:50,670 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:35:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-15T01:35:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742177_1353 (size=400) 2024-12-15T01:35:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742177_1353 (size=400) 2024-12-15T01:35:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742177_1353 (size=400) 2024-12-15T01:35:50,677 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => eded217274ea249d8fe0b60f2e3037a1, NAME => 'testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:50,678 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 6e4491bba5f2e14d8ad41ac86f60d6f5, NAME => 'testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:35:50,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742179_1355 (size=61) 2024-12-15T01:35:50,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742179_1355 (size=61) 2024-12-15T01:35:50,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742179_1355 (size=61) 2024-12-15T01:35:50,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742178_1354 (size=61) 2024-12-15T01:35:50,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742178_1354 (size=61) 2024-12-15T01:35:50,688 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:50,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742178_1354 (size=61) 2024-12-15T01:35:50,688 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing eded217274ea249d8fe0b60f2e3037a1, disabling compactions & flushes 2024-12-15T01:35:50,688 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:50,688 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:50,688 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. after waiting 0 ms 2024-12-15T01:35:50,688 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:50,688 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:50,688 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for eded217274ea249d8fe0b60f2e3037a1: 2024-12-15T01:35:50,695 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:50,695 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 6e4491bba5f2e14d8ad41ac86f60d6f5, disabling compactions & flushes 2024-12-15T01:35:50,695 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:50,695 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:50,695 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. after waiting 0 ms 2024-12-15T01:35:50,695 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:50,695 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:50,695 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 6e4491bba5f2e14d8ad41ac86f60d6f5: 2024-12-15T01:35:50,696 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:35:50,697 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734226550696"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226550696"}]},"ts":"1734226550696"} 2024-12-15T01:35:50,697 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734226550696"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226550696"}]},"ts":"1734226550696"} 2024-12-15T01:35:50,698 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:35:50,699 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:35:50,699 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226550699"}]},"ts":"1734226550699"} 2024-12-15T01:35:50,701 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-15T01:35:50,762 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:35:50,763 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:35:50,763 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:35:50,763 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:35:50,763 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:35:50,763 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:35:50,763 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:35:50,763 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:35:50,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=eded217274ea249d8fe0b60f2e3037a1, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6e4491bba5f2e14d8ad41ac86f60d6f5, ASSIGN}] 2024-12-15T01:35:50,765 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6e4491bba5f2e14d8ad41ac86f60d6f5, ASSIGN 2024-12-15T01:35:50,765 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=eded217274ea249d8fe0b60f2e3037a1, ASSIGN 2024-12-15T01:35:50,765 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6e4491bba5f2e14d8ad41ac86f60d6f5, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:35:50,765 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=eded217274ea249d8fe0b60f2e3037a1, ASSIGN; state=OFFLINE, location=66d575e28c61,42663,1734226422715; forceNewPlan=false, retain=false 2024-12-15T01:35:50,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-15T01:35:50,916 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:35:50,916 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=eded217274ea249d8fe0b60f2e3037a1, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:35:50,916 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=6e4491bba5f2e14d8ad41ac86f60d6f5, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:50,919 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; OpenRegionProcedure 6e4491bba5f2e14d8ad41ac86f60d6f5, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:35:50,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=148, state=RUNNABLE; OpenRegionProcedure eded217274ea249d8fe0b60f2e3037a1, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:35:50,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-15T01:35:51,072 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:51,074 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:35:51,076 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:51,076 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e4491bba5f2e14d8ad41ac86f60d6f5, NAME => 'testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:35:51,076 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. service=AccessControlService 2024-12-15T01:35:51,076 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:51,077 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,077 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:51,077 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,077 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,077 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:51,078 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => eded217274ea249d8fe0b60f2e3037a1, NAME => 'testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:35:51,078 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. service=AccessControlService 2024-12-15T01:35:51,078 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:35:51,078 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,079 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:35:51,079 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,079 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,079 INFO [StoreOpener-6e4491bba5f2e14d8ad41ac86f60d6f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,080 INFO [StoreOpener-eded217274ea249d8fe0b60f2e3037a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,081 INFO [StoreOpener-6e4491bba5f2e14d8ad41ac86f60d6f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e4491bba5f2e14d8ad41ac86f60d6f5 columnFamilyName cf 2024-12-15T01:35:51,081 DEBUG [StoreOpener-6e4491bba5f2e14d8ad41ac86f60d6f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:51,082 INFO [StoreOpener-6e4491bba5f2e14d8ad41ac86f60d6f5-1 {}] regionserver.HStore(327): Store=6e4491bba5f2e14d8ad41ac86f60d6f5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:51,082 INFO [StoreOpener-eded217274ea249d8fe0b60f2e3037a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eded217274ea249d8fe0b60f2e3037a1 columnFamilyName cf 2024-12-15T01:35:51,082 DEBUG [StoreOpener-eded217274ea249d8fe0b60f2e3037a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:35:51,083 INFO [StoreOpener-eded217274ea249d8fe0b60f2e3037a1-1 {}] regionserver.HStore(327): Store=eded217274ea249d8fe0b60f2e3037a1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:35:51,083 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,083 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,083 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,084 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,085 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,085 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,087 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:51,087 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:35:51,087 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened eded217274ea249d8fe0b60f2e3037a1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59593604, jitterRate=-0.11198610067367554}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:51,087 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 6e4491bba5f2e14d8ad41ac86f60d6f5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62869480, jitterRate=-0.06317174434661865}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:35:51,088 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for eded217274ea249d8fe0b60f2e3037a1: 2024-12-15T01:35:51,088 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 6e4491bba5f2e14d8ad41ac86f60d6f5: 2024-12-15T01:35:51,088 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1., pid=151, masterSystemTime=1734226551074 2024-12-15T01:35:51,088 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5., pid=150, masterSystemTime=1734226551072 2024-12-15T01:35:51,089 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:51,090 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:51,090 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=eded217274ea249d8fe0b60f2e3037a1, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:35:51,090 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:51,090 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:51,090 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=6e4491bba5f2e14d8ad41ac86f60d6f5, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:35:51,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=148 2024-12-15T01:35:51,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=148, state=SUCCESS; OpenRegionProcedure eded217274ea249d8fe0b60f2e3037a1, server=66d575e28c61,42663,1734226422715 in 170 msec 2024-12-15T01:35:51,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-15T01:35:51,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; OpenRegionProcedure 6e4491bba5f2e14d8ad41ac86f60d6f5, server=66d575e28c61,39381,1734226422872 in 173 msec 2024-12-15T01:35:51,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=eded217274ea249d8fe0b60f2e3037a1, ASSIGN in 329 msec 2024-12-15T01:35:51,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=147 2024-12-15T01:35:51,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6e4491bba5f2e14d8ad41ac86f60d6f5, ASSIGN in 330 msec 2024-12-15T01:35:51,094 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:35:51,095 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226551094"}]},"ts":"1734226551094"} 2024-12-15T01:35:51,096 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-15T01:35:51,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:35:51,105 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-15T01:35:51,106 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-15T01:35:51,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:51,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:51,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:51,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:35:51,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 453 msec 2024-12-15T01:35:51,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-15T01:35:51,274 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-15T01:35:51,274 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-15T01:35:51,274 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:51,277 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-15T01:35:51,277 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:51,277 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-15T01:35:51,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:35:51,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:35:51,287 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-15T01:35:51,287 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:51,287 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:35:51,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-15T01:35:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-15T01:35:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:35:51,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x52771570 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38566328 2024-12-15T01:35:51,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@671ed733, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:51,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:51,315 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54176, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:51,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x52771570 to 127.0.0.1:64248 2024-12-15T01:35:51,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:51,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2aa33420 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@21674d7f 2024-12-15T01:35:51,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3051a12f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:35:51,331 DEBUG [hconnection-0x5b01d1bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:51,332 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:51,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:35:51,334 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:35:51,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2aa33420 to 127.0.0.1:64248 2024-12-15T01:35:51,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:35:51,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-15T01:35:51,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:35:51,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-15T01:35:51,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-15T01:35:51,337 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:35:51,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-15T01:35:51,337 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:35:51,340 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:35:51,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742180_1356 (size=152) 2024-12-15T01:35:51,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742180_1356 (size=152) 2024-12-15T01:35:51,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742180_1356 (size=152) 2024-12-15T01:35:51,364 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:35:51,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure eded217274ea249d8fe0b60f2e3037a1}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 6e4491bba5f2e14d8ad41ac86f60d6f5}] 2024-12-15T01:35:51,365 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,365 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-15T01:35:51,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:35:51,516 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:35:51,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-15T01:35:51,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-15T01:35:51,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:51,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:51,517 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing eded217274ea249d8fe0b60f2e3037a1 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-15T01:35:51,517 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 6e4491bba5f2e14d8ad41ac86f60d6f5 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-15T01:35:51,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/.tmp/cf/b68d4ed0636e41678f738480c5bad294 is 71, key is 014b3d25e9e16a50fcb0222ba42efcea/cf:q/1734226551284/Put/seqid=0 2024-12-15T01:35:51,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/.tmp/cf/25e1759b56bc49619546d974c9f24a7c is 71, key is 10067060bbcca51df11600476a2bf23b/cf:q/1734226551284/Put/seqid=0 2024-12-15T01:35:51,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742181_1357 (size=5490) 2024-12-15T01:35:51,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742182_1358 (size=8120) 2024-12-15T01:35:51,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742181_1357 (size=5490) 2024-12-15T01:35:51,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742182_1358 (size=8120) 2024-12-15T01:35:51,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742181_1357 (size=5490) 2024-12-15T01:35:51,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742182_1358 (size=8120) 2024-12-15T01:35:51,543 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/.tmp/cf/b68d4ed0636e41678f738480c5bad294 2024-12-15T01:35:51,543 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/.tmp/cf/25e1759b56bc49619546d974c9f24a7c 2024-12-15T01:35:51,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/.tmp/cf/b68d4ed0636e41678f738480c5bad294 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/cf/b68d4ed0636e41678f738480c5bad294 2024-12-15T01:35:51,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/.tmp/cf/25e1759b56bc49619546d974c9f24a7c as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/cf/25e1759b56bc49619546d974c9f24a7c 2024-12-15T01:35:51,550 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/cf/b68d4ed0636e41678f738480c5bad294, entries=6, sequenceid=5, filesize=5.4 K 2024-12-15T01:35:51,550 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/cf/25e1759b56bc49619546d974c9f24a7c, entries=44, sequenceid=5, filesize=7.9 K 2024-12-15T01:35:51,551 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for eded217274ea249d8fe0b60f2e3037a1 in 34ms, sequenceid=5, compaction requested=false 2024-12-15T01:35:51,551 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 6e4491bba5f2e14d8ad41ac86f60d6f5 in 34ms, sequenceid=5, compaction requested=false 2024-12-15T01:35:51,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-15T01:35:51,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 6e4491bba5f2e14d8ad41ac86f60d6f5: 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for eded217274ea249d8fe0b60f2e3037a1: 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. for snapshot-testExportExpiredSnapshot completed. 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. for snapshot-testExportExpiredSnapshot completed. 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/cf/25e1759b56bc49619546d974c9f24a7c] hfiles 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/cf/25e1759b56bc49619546d974c9f24a7c for snapshot=snapshot-testExportExpiredSnapshot 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/cf/b68d4ed0636e41678f738480c5bad294] hfiles 2024-12-15T01:35:51,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/cf/b68d4ed0636e41678f738480c5bad294 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-15T01:35:51,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742183_1359 (size=103) 2024-12-15T01:35:51,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742183_1359 (size=103) 2024-12-15T01:35:51,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742184_1360 (size=103) 2024-12-15T01:35:51,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742184_1360 (size=103) 2024-12-15T01:35:51,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742183_1359 (size=103) 2024-12-15T01:35:51,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742184_1360 (size=103) 2024-12-15T01:35:51,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:35:51,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-15T01:35:51,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:35:51,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-15T01:35:51,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-15T01:35:51,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-15T01:35:51,563 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,563 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:35:51,563 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:35:51,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure 6e4491bba5f2e14d8ad41ac86f60d6f5 in 199 msec 2024-12-15T01:35:51,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-15T01:35:51,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure eded217274ea249d8fe0b60f2e3037a1 in 200 msec 2024-12-15T01:35:51,565 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:35:51,565 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:35:51,566 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:35:51,566 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-15T01:35:51,566 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-15T01:35:51,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742185_1361 (size=609) 2024-12-15T01:35:51,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742185_1361 (size=609) 2024-12-15T01:35:51,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742185_1361 (size=609) 2024-12-15T01:35:51,580 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:35:51,585 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:35:51,586 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-15T01:35:51,587 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:35:51,587 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-15T01:35:51,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 252 msec 2024-12-15T01:35:51,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-15T01:35:51,639 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-15T01:35:52,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-15T01:35:52,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-15T01:35:52,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-15T01:35:52,294 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-15T01:35:52,295 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-15T01:35:52,296 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-15T01:35:52,564 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0006_000001 (auth:SIMPLE) from 127.0.0.1:48062 2024-12-15T01:35:52,578 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_2/usercache/jenkins/appcache/application_1734226430009_0006/container_1734226430009_0006_01_000001/launch_container.sh] 2024-12-15T01:35:52,578 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_2/usercache/jenkins/appcache/application_1734226430009_0006/container_1734226430009_0006_01_000001/container_tokens] 2024-12-15T01:35:52,579 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_2/usercache/jenkins/appcache/application_1734226430009_0006/container_1734226430009_0006_01_000001/sysfs] 2024-12-15T01:35:54,120 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:36:01,654 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226561654 2024-12-15T01:36:01,654 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226561654, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226561654, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:01,685 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:01,685 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226561654, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226561654/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-15T01:36:01,688 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:36:01,689 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[classes/:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:36:01,691 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-15T01:36:01,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-15T01:36:01,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:36:01,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T01:36:01,693 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226561693"}]},"ts":"1734226561693"} 2024-12-15T01:36:01,695 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-15T01:36:01,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T01:36:01,795 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-15T01:36:01,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-15T01:36:01,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=670f0adb452d72ff28029f7d197d3878, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=be1c2fa34021a22845f906c49f0630b3, UNASSIGN}] 2024-12-15T01:36:01,798 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=be1c2fa34021a22845f906c49f0630b3, UNASSIGN 2024-12-15T01:36:01,798 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=670f0adb452d72ff28029f7d197d3878, UNASSIGN 2024-12-15T01:36:01,799 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=be1c2fa34021a22845f906c49f0630b3, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:36:01,799 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=670f0adb452d72ff28029f7d197d3878, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:36:01,800 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:36:01,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure be1c2fa34021a22845f906c49f0630b3, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:36:01,801 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:36:01,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=157, state=RUNNABLE; CloseRegionProcedure 670f0adb452d72ff28029f7d197d3878, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:36:01,953 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:01,953 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:36:01,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing be1c2fa34021a22845f906c49f0630b3, disabling compactions & flushes 2024-12-15T01:36:01,954 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. after waiting 0 ms 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:36:01,954 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing 670f0adb452d72ff28029f7d197d3878, disabling compactions & flushes 2024-12-15T01:36:01,954 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:36:01,954 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. after waiting 0 ms 2024-12-15T01:36:01,955 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:36:01,960 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:36:01,961 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:36:01,961 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3. 2024-12-15T01:36:01,961 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for be1c2fa34021a22845f906c49f0630b3: 2024-12-15T01:36:01,962 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:36:01,962 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:36:01,963 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:36:01,963 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878. 2024-12-15T01:36:01,963 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for 670f0adb452d72ff28029f7d197d3878: 2024-12-15T01:36:01,963 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=be1c2fa34021a22845f906c49f0630b3, regionState=CLOSED 2024-12-15T01:36:01,964 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed 670f0adb452d72ff28029f7d197d3878 2024-12-15T01:36:01,965 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=670f0adb452d72ff28029f7d197d3878, regionState=CLOSED 2024-12-15T01:36:01,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-15T01:36:01,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure be1c2fa34021a22845f906c49f0630b3, server=66d575e28c61,39381,1734226422872 in 164 msec 2024-12-15T01:36:01,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=be1c2fa34021a22845f906c49f0630b3, UNASSIGN in 169 msec 2024-12-15T01:36:01,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=157 2024-12-15T01:36:01,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=157, state=SUCCESS; CloseRegionProcedure 670f0adb452d72ff28029f7d197d3878, server=66d575e28c61,35281,1734226422799 in 165 msec 2024-12-15T01:36:01,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-15T01:36:01,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=670f0adb452d72ff28029f7d197d3878, UNASSIGN in 171 msec 2024-12-15T01:36:01,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-15T01:36:01,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 174 msec 2024-12-15T01:36:01,972 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226561972"}]},"ts":"1734226561972"} 2024-12-15T01:36:01,973 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-15T01:36:01,994 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-15T01:36:01,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T01:36:01,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 303 msec 2024-12-15T01:36:02,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T01:36:02,298 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-15T01:36:02,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,303 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,304 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,306 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,308 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:36:02,308 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878 2024-12-15T01:36:02,311 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/recovered.edits] 2024-12-15T01:36:02,311 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/recovered.edits] 2024-12-15T01:36:02,315 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/cf/70347d0959434dce86c4ac7d910edb77 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/cf/70347d0959434dce86c4ac7d910edb77 2024-12-15T01:36:02,315 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/cf/16ff105b593242caa76d039ceb8b04a3 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/cf/16ff105b593242caa76d039ceb8b04a3 2024-12-15T01:36:02,317 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878/recovered.edits/9.seqid 2024-12-15T01:36:02,317 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3/recovered.edits/9.seqid 2024-12-15T01:36:02,317 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/670f0adb452d72ff28029f7d197d3878 2024-12-15T01:36:02,317 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportExpiredSnapshot/be1c2fa34021a22845f906c49f0630b3 2024-12-15T01:36:02,317 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-15T01:36:02,318 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,320 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-15T01:36:02,322 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-15T01:36:02,323 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,323 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-15T01:36:02,323 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226562323"}]},"ts":"9223372036854775807"} 2024-12-15T01:36:02,323 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226562323"}]},"ts":"9223372036854775807"} 2024-12-15T01:36:02,325 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:36:02,325 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 670f0adb452d72ff28029f7d197d3878, NAME => 'testtb-testExportExpiredSnapshot,,1734226549295.670f0adb452d72ff28029f7d197d3878.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => be1c2fa34021a22845f906c49f0630b3, NAME => 'testtb-testExportExpiredSnapshot,1,1734226549295.be1c2fa34021a22845f906c49f0630b3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:36:02,325 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-15T01:36:02,325 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226562325"}]},"ts":"9223372036854775807"} 2024-12-15T01:36:02,327 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-15T01:36:02,354 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 55 msec 2024-12-15T01:36:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-15T01:36:02,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-15T01:36:02,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-15T01:36:02,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T01:36:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,372 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-15T01:36:02,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-15T01:36:02,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-15T01:36:02,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-15T01:36:02,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-15T01:36:02,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-15T01:36:02,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-15T01:36:02,412 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=798 (was 806), OpenFileDescriptor=795 (was 810), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=458 (was 570), ProcessCount=11 (was 17), AvailableMemoryMB=4169 (was 3409) - AvailableMemoryMB LEAK? - 2024-12-15T01:36:02,412 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-15T01:36:02,429 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=798, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=458, ProcessCount=11, AvailableMemoryMB=4168 2024-12-15T01:36:02,429 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-15T01:36:02,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:36:02,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:02,432 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:36:02,432 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:36:02,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-15T01:36:02,433 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:36:02,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-15T01:36:02,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742186_1362 (size=412) 2024-12-15T01:36:02,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742186_1362 (size=412) 2024-12-15T01:36:02,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742186_1362 (size=412) 2024-12-15T01:36:02,443 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 5399b49682ac5742acedeb1201f18c71, NAME => 'testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:02,443 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8019e6b666090168a1635a3c2749d379, NAME => 'testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:02,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742188_1364 (size=73) 2024-12-15T01:36:02,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742187_1363 (size=73) 2024-12-15T01:36:02,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742188_1364 (size=73) 2024-12-15T01:36:02,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742187_1363 (size=73) 2024-12-15T01:36:02,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742188_1364 (size=73) 2024-12-15T01:36:02,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742187_1363 (size=73) 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 8019e6b666090168a1635a3c2749d379, disabling compactions & flushes 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 5399b49682ac5742acedeb1201f18c71, disabling compactions & flushes 2024-12-15T01:36:02,452 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:02,452 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. after waiting 0 ms 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. after waiting 0 ms 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:02,452 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:02,452 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 5399b49682ac5742acedeb1201f18c71: 2024-12-15T01:36:02,452 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8019e6b666090168a1635a3c2749d379: 2024-12-15T01:36:02,453 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:36:02,453 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734226562453"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226562453"}]},"ts":"1734226562453"} 2024-12-15T01:36:02,453 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734226562453"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226562453"}]},"ts":"1734226562453"} 2024-12-15T01:36:02,455 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:36:02,456 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:36:02,456 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226562456"}]},"ts":"1734226562456"} 2024-12-15T01:36:02,457 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-15T01:36:02,470 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:36:02,471 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:36:02,471 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:36:02,471 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:36:02,471 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:36:02,471 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:36:02,471 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:36:02,471 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:36:02,471 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8019e6b666090168a1635a3c2749d379, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=5399b49682ac5742acedeb1201f18c71, ASSIGN}] 2024-12-15T01:36:02,472 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=5399b49682ac5742acedeb1201f18c71, ASSIGN 2024-12-15T01:36:02,472 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8019e6b666090168a1635a3c2749d379, ASSIGN 2024-12-15T01:36:02,473 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=5399b49682ac5742acedeb1201f18c71, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:36:02,473 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8019e6b666090168a1635a3c2749d379, ASSIGN; state=OFFLINE, location=66d575e28c61,42663,1734226422715; forceNewPlan=false, retain=false 2024-12-15T01:36:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-15T01:36:02,623 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:36:02,623 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=8019e6b666090168a1635a3c2749d379, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:36:02,623 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=5399b49682ac5742acedeb1201f18c71, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:36:02,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=163, state=RUNNABLE; OpenRegionProcedure 8019e6b666090168a1635a3c2749d379, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:36:02,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure 5399b49682ac5742acedeb1201f18c71, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:36:02,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-15T01:36:02,776 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:36:02,778 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:02,785 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:02,785 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 5399b49682ac5742acedeb1201f18c71, NAME => 'testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:36:02,785 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. service=AccessControlService 2024-12-15T01:36:02,785 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:02,786 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 8019e6b666090168a1635a3c2749d379, NAME => 'testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:36:02,786 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:36:02,786 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:02,786 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:02,786 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. service=AccessControlService 2024-12-15T01:36:02,786 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:02,786 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:02,787 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:36:02,787 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:02,787 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:02,787 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:02,787 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:02,788 INFO [StoreOpener-5399b49682ac5742acedeb1201f18c71-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:02,789 INFO [StoreOpener-8019e6b666090168a1635a3c2749d379-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:02,790 INFO [StoreOpener-5399b49682ac5742acedeb1201f18c71-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5399b49682ac5742acedeb1201f18c71 columnFamilyName cf 2024-12-15T01:36:02,790 DEBUG [StoreOpener-5399b49682ac5742acedeb1201f18c71-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:36:02,790 INFO [StoreOpener-8019e6b666090168a1635a3c2749d379-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8019e6b666090168a1635a3c2749d379 columnFamilyName cf 2024-12-15T01:36:02,790 DEBUG [StoreOpener-8019e6b666090168a1635a3c2749d379-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:36:02,790 INFO [StoreOpener-5399b49682ac5742acedeb1201f18c71-1 {}] regionserver.HStore(327): Store=5399b49682ac5742acedeb1201f18c71/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:36:02,790 INFO [StoreOpener-8019e6b666090168a1635a3c2749d379-1 {}] regionserver.HStore(327): Store=8019e6b666090168a1635a3c2749d379/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:36:02,791 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:02,791 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:02,791 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:02,791 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:02,793 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:02,793 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:02,795 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:36:02,795 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:36:02,795 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 8019e6b666090168a1635a3c2749d379; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70158089, jitterRate=0.04543699324131012}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:36:02,796 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 5399b49682ac5742acedeb1201f18c71; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60309541, jitterRate=-0.10131780803203583}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:36:02,796 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 5399b49682ac5742acedeb1201f18c71: 2024-12-15T01:36:02,796 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 8019e6b666090168a1635a3c2749d379: 2024-12-15T01:36:02,796 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379., pid=165, masterSystemTime=1734226562776 2024-12-15T01:36:02,796 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71., pid=166, masterSystemTime=1734226562777 2024-12-15T01:36:02,798 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:02,798 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:02,798 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=5399b49682ac5742acedeb1201f18c71, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:36:02,798 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:02,798 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:02,799 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=8019e6b666090168a1635a3c2749d379, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:36:02,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-12-15T01:36:02,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=163 2024-12-15T01:36:02,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure 5399b49682ac5742acedeb1201f18c71, server=66d575e28c61,39381,1734226422872 in 174 msec 2024-12-15T01:36:02,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=163, state=SUCCESS; OpenRegionProcedure 8019e6b666090168a1635a3c2749d379, server=66d575e28c61,42663,1734226422715 in 175 msec 2024-12-15T01:36:02,802 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=5399b49682ac5742acedeb1201f18c71, ASSIGN in 330 msec 2024-12-15T01:36:02,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-15T01:36:02,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8019e6b666090168a1635a3c2749d379, ASSIGN in 331 msec 2024-12-15T01:36:02,803 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:36:02,804 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226562803"}]},"ts":"1734226562803"} 2024-12-15T01:36:02,805 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-15T01:36:02,812 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:36:02,812 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-15T01:36:02,814 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-15T01:36:02,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:02,871 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,871 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,872 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,872 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,872 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,872 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,872 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,872 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:02,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 439 msec 2024-12-15T01:36:02,885 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-15T01:36:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-15T01:36:03,040 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-15T01:36:03,040 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-15T01:36:03,040 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:36:03,049 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-15T01:36:03,049 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:36:03,049 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-15T01:36:03,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:36:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226563053 (current time:1734226563053). 2024-12-15T01:36:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:36:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-15T01:36:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:36:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x096a8a17 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9b88cf4 2024-12-15T01:36:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c8771a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:03,065 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x096a8a17 to 127.0.0.1:64248 2024-12-15T01:36:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x565070a4 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1bc289b6 2024-12-15T01:36:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c655d73, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:03,081 DEBUG [hconnection-0x7c2dfd59-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:03,082 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:03,084 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52354, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x565070a4 to 127.0.0.1:64248 2024-12-15T01:36:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-15T01:36:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:36:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:36:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-15T01:36:03,087 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:36:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T01:36:03,087 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:36:03,089 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:36:03,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742189_1365 (size=185) 2024-12-15T01:36:03,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742189_1365 (size=185) 2024-12-15T01:36:03,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742189_1365 (size=185) 2024-12-15T01:36:03,095 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:36:03,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71}] 2024-12-15T01:36:03,096 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:03,096 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T01:36:03,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:36:03,247 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:03,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-15T01:36:03,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-15T01:36:03,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:03,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:03,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for 5399b49682ac5742acedeb1201f18c71: 2024-12-15T01:36:03,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 8019e6b666090168a1635a3c2749d379: 2024-12-15T01:36:03,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-15T01:36:03,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-15T01:36:03,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:03,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:03,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:36:03,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:36:03,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742190_1366 (size=76) 2024-12-15T01:36:03,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742190_1366 (size=76) 2024-12-15T01:36:03,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742191_1367 (size=76) 2024-12-15T01:36:03,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742191_1367 (size=76) 2024-12-15T01:36:03,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742191_1367 (size=76) 2024-12-15T01:36:03,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742190_1366 (size=76) 2024-12-15T01:36:03,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:03,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:03,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-15T01:36:03,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-15T01:36:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-15T01:36:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-15T01:36:03,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:03,263 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:03,263 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:03,264 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:03,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71 in 169 msec 2024-12-15T01:36:03,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-15T01:36:03,266 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379 in 169 msec 2024-12-15T01:36:03,266 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:36:03,266 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:36:03,267 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:36:03,267 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,268 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742192_1368 (size=567) 2024-12-15T01:36:03,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742192_1368 (size=567) 2024-12-15T01:36:03,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742192_1368 (size=567) 2024-12-15T01:36:03,279 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:36:03,285 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:36:03,286 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,288 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:36:03,288 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-15T01:36:03,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 203 msec 2024-12-15T01:36:03,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T01:36:03,390 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-15T01:36:03,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:36:03,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:36:03,402 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-15T01:36:03,402 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:03,402 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:36:03,413 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:36:03,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226563413 (current time:1734226563413). 2024-12-15T01:36:03,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:36:03,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-15T01:36:03,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:36:03,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e5b89f4 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@296ac2ad 2024-12-15T01:36:03,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b489c57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:03,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:03,423 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:03,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e5b89f4 to 127.0.0.1:64248 2024-12-15T01:36:03,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1129f94d to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e292e6f 2024-12-15T01:36:03,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@610cadd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:03,439 DEBUG [hconnection-0x6c89ab11-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:03,440 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:03,442 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1129f94d to 127.0.0.1:64248 2024-12-15T01:36:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-15T01:36:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:36:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-15T01:36:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-15T01:36:03,446 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:36:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-15T01:36:03,448 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:36:03,450 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:36:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742193_1369 (size=180) 2024-12-15T01:36:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742193_1369 (size=180) 2024-12-15T01:36:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742193_1369 (size=180) 2024-12-15T01:36:03,459 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:36:03,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71}] 2024-12-15T01:36:03,460 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:03,460 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:03,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-15T01:36:03,611 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:03,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:36:03,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-15T01:36:03,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-15T01:36:03,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:03,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:03,613 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 8019e6b666090168a1635a3c2749d379 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-15T01:36:03,613 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 5399b49682ac5742acedeb1201f18c71 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-15T01:36:03,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/.tmp/cf/b4405f8543bf49cface0c0da8ce5c0a2 is 71, key is 01cf8de36af7ee22efaea8b70d232f72/cf:q/1734226563398/Put/seqid=0 2024-12-15T01:36:03,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/.tmp/cf/f2e87c473e88477280e5964e5fc9c539 is 71, key is 101d893729d9e2591dea3d1c0c50bc87/cf:q/1734226563399/Put/seqid=0 2024-12-15T01:36:03,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742194_1370 (size=5356) 2024-12-15T01:36:03,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742194_1370 (size=5356) 2024-12-15T01:36:03,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742194_1370 (size=5356) 2024-12-15T01:36:03,636 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/.tmp/cf/b4405f8543bf49cface0c0da8ce5c0a2 2024-12-15T01:36:03,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742195_1371 (size=8256) 2024-12-15T01:36:03,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742195_1371 (size=8256) 2024-12-15T01:36:03,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742195_1371 (size=8256) 2024-12-15T01:36:03,640 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/.tmp/cf/f2e87c473e88477280e5964e5fc9c539 2024-12-15T01:36:03,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/.tmp/cf/b4405f8543bf49cface0c0da8ce5c0a2 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/cf/b4405f8543bf49cface0c0da8ce5c0a2 2024-12-15T01:36:03,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/.tmp/cf/f2e87c473e88477280e5964e5fc9c539 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/cf/f2e87c473e88477280e5964e5fc9c539 2024-12-15T01:36:03,645 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/cf/b4405f8543bf49cface0c0da8ce5c0a2, entries=4, sequenceid=6, filesize=5.2 K 2024-12-15T01:36:03,646 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 8019e6b666090168a1635a3c2749d379 in 33ms, sequenceid=6, compaction requested=false 2024-12-15T01:36:03,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 8019e6b666090168a1635a3c2749d379: 2024-12-15T01:36:03,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-15T01:36:03,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:03,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/cf/b4405f8543bf49cface0c0da8ce5c0a2] hfiles 2024-12-15T01:36:03,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/cf/b4405f8543bf49cface0c0da8ce5c0a2 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,649 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/cf/f2e87c473e88477280e5964e5fc9c539, entries=46, sequenceid=6, filesize=8.1 K 2024-12-15T01:36:03,650 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 5399b49682ac5742acedeb1201f18c71 in 37ms, sequenceid=6, compaction requested=false 2024-12-15T01:36:03,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 5399b49682ac5742acedeb1201f18c71: 2024-12-15T01:36:03,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-15T01:36:03,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:03,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/cf/f2e87c473e88477280e5964e5fc9c539] hfiles 2024-12-15T01:36:03,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/cf/f2e87c473e88477280e5964e5fc9c539 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742196_1372 (size=115) 2024-12-15T01:36:03,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742196_1372 (size=115) 2024-12-15T01:36:03,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742196_1372 (size=115) 2024-12-15T01:36:03,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:03,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-15T01:36:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-15T01:36:03,652 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:03,652 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:03,654 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 8019e6b666090168a1635a3c2749d379 in 193 msec 2024-12-15T01:36:03,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742197_1373 (size=115) 2024-12-15T01:36:03,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742197_1373 (size=115) 2024-12-15T01:36:03,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742197_1373 (size=115) 2024-12-15T01:36:03,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:03,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-15T01:36:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-15T01:36:03,656 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:03,657 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:03,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170 2024-12-15T01:36:03,658 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:36:03,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure 5399b49682ac5742acedeb1201f18c71 in 197 msec 2024-12-15T01:36:03,658 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:36:03,659 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:36:03,659 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,659 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742198_1374 (size=645) 2024-12-15T01:36:03,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742198_1374 (size=645) 2024-12-15T01:36:03,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742198_1374 (size=645) 2024-12-15T01:36:03,667 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:36:03,671 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:36:03,672 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,673 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:36:03,673 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-15T01:36:03,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 229 msec 2024-12-15T01:36:03,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-15T01:36:03,750 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-15T01:36:03,750 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750 2024-12-15T01:36:03,750 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:03,773 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:03,773 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,774 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:36:03,778 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:03,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742200_1376 (size=567) 2024-12-15T01:36:03,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742199_1375 (size=185) 2024-12-15T01:36:03,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742200_1376 (size=567) 2024-12-15T01:36:03,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742199_1375 (size=185) 2024-12-15T01:36:03,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742200_1376 (size=567) 2024-12-15T01:36:03,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742199_1375 (size=185) 2024-12-15T01:36:03,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-18003419906893295627.jar 2024-12-15T01:36:03,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:03,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:03,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-14989701634722712488.jar 2024-12-15T01:36:04,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-15935509330828166512.jar 2024-12-15T01:36:04,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:04,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:36:04,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:36:04,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:36:04,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:36:04,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:36:04,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:36:04,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:36:04,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:36:04,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:36:04,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:36:04,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:36:04,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:36:04,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:04,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:04,789 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:04,789 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:04,789 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:04,789 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:04,789 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742201_1377 (size=127628) 2024-12-15T01:36:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742201_1377 (size=127628) 2024-12-15T01:36:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742201_1377 (size=127628) 2024-12-15T01:36:04,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742202_1378 (size=2172137) 2024-12-15T01:36:04,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742202_1378 (size=2172137) 2024-12-15T01:36:04,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742202_1378 (size=2172137) 2024-12-15T01:36:04,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742203_1379 (size=213228) 2024-12-15T01:36:04,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742203_1379 (size=213228) 2024-12-15T01:36:04,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742203_1379 (size=213228) 2024-12-15T01:36:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742204_1380 (size=1877034) 2024-12-15T01:36:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742204_1380 (size=1877034) 2024-12-15T01:36:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742204_1380 (size=1877034) 2024-12-15T01:36:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742205_1381 (size=533455) 2024-12-15T01:36:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742205_1381 (size=533455) 2024-12-15T01:36:04,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742205_1381 (size=533455) 2024-12-15T01:36:04,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742206_1382 (size=7280644) 2024-12-15T01:36:04,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742206_1382 (size=7280644) 2024-12-15T01:36:04,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742206_1382 (size=7280644) 2024-12-15T01:36:04,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742207_1383 (size=4188619) 2024-12-15T01:36:04,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742207_1383 (size=4188619) 2024-12-15T01:36:04,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742207_1383 (size=4188619) 2024-12-15T01:36:04,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742208_1384 (size=20406) 2024-12-15T01:36:04,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742208_1384 (size=20406) 2024-12-15T01:36:04,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742208_1384 (size=20406) 2024-12-15T01:36:04,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742209_1385 (size=75495) 2024-12-15T01:36:04,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742209_1385 (size=75495) 2024-12-15T01:36:04,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742209_1385 (size=75495) 2024-12-15T01:36:04,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742210_1386 (size=45609) 2024-12-15T01:36:04,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742210_1386 (size=45609) 2024-12-15T01:36:04,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742210_1386 (size=45609) 2024-12-15T01:36:04,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742211_1387 (size=110084) 2024-12-15T01:36:04,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742211_1387 (size=110084) 2024-12-15T01:36:04,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742211_1387 (size=110084) 2024-12-15T01:36:04,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742212_1388 (size=1323991) 2024-12-15T01:36:04,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742212_1388 (size=1323991) 2024-12-15T01:36:04,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742212_1388 (size=1323991) 2024-12-15T01:36:04,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742213_1389 (size=23076) 2024-12-15T01:36:04,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742213_1389 (size=23076) 2024-12-15T01:36:04,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742213_1389 (size=23076) 2024-12-15T01:36:04,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742214_1390 (size=126803) 2024-12-15T01:36:04,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742214_1390 (size=126803) 2024-12-15T01:36:04,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742214_1390 (size=126803) 2024-12-15T01:36:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742215_1391 (size=322274) 2024-12-15T01:36:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742215_1391 (size=322274) 2024-12-15T01:36:04,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742215_1391 (size=322274) 2024-12-15T01:36:04,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742216_1392 (size=1832290) 2024-12-15T01:36:04,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742216_1392 (size=1832290) 2024-12-15T01:36:04,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742216_1392 (size=1832290) 2024-12-15T01:36:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742217_1393 (size=30081) 2024-12-15T01:36:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742217_1393 (size=30081) 2024-12-15T01:36:05,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742217_1393 (size=30081) 2024-12-15T01:36:05,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742218_1394 (size=53616) 2024-12-15T01:36:05,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742218_1394 (size=53616) 2024-12-15T01:36:05,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742218_1394 (size=53616) 2024-12-15T01:36:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742219_1395 (size=29229) 2024-12-15T01:36:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742219_1395 (size=29229) 2024-12-15T01:36:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742219_1395 (size=29229) 2024-12-15T01:36:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742220_1396 (size=169089) 2024-12-15T01:36:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742220_1396 (size=169089) 2024-12-15T01:36:05,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742220_1396 (size=169089) 2024-12-15T01:36:05,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742221_1397 (size=912094) 2024-12-15T01:36:05,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742221_1397 (size=912094) 2024-12-15T01:36:05,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742221_1397 (size=912094) 2024-12-15T01:36:05,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742222_1398 (size=5175431) 2024-12-15T01:36:05,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742222_1398 (size=5175431) 2024-12-15T01:36:05,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742222_1398 (size=5175431) 2024-12-15T01:36:05,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742223_1399 (size=136454) 2024-12-15T01:36:05,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742223_1399 (size=136454) 2024-12-15T01:36:05,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742223_1399 (size=136454) 2024-12-15T01:36:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742224_1400 (size=451756) 2024-12-15T01:36:05,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742224_1400 (size=451756) 2024-12-15T01:36:05,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742224_1400 (size=451756) 2024-12-15T01:36:05,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742225_1401 (size=3317408) 2024-12-15T01:36:05,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742225_1401 (size=3317408) 2024-12-15T01:36:05,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742225_1401 (size=3317408) 2024-12-15T01:36:05,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742226_1402 (size=6350909) 2024-12-15T01:36:05,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742226_1402 (size=6350909) 2024-12-15T01:36:05,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742226_1402 (size=6350909) 2024-12-15T01:36:05,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742227_1403 (size=503880) 2024-12-15T01:36:05,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742227_1403 (size=503880) 2024-12-15T01:36:05,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742227_1403 (size=503880) 2024-12-15T01:36:05,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742228_1404 (size=4695811) 2024-12-15T01:36:05,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742228_1404 (size=4695811) 2024-12-15T01:36:05,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742228_1404 (size=4695811) 2024-12-15T01:36:05,131 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:36:05,133 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-15T01:36:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742229_1405 (size=7) 2024-12-15T01:36:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742229_1405 (size=7) 2024-12-15T01:36:05,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742229_1405 (size=7) 2024-12-15T01:36:05,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742230_1406 (size=10) 2024-12-15T01:36:05,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742230_1406 (size=10) 2024-12-15T01:36:05,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742230_1406 (size=10) 2024-12-15T01:36:05,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742231_1407 (size=304840) 2024-12-15T01:36:05,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742231_1407 (size=304840) 2024-12-15T01:36:05,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742231_1407 (size=304840) 2024-12-15T01:36:05,171 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:36:05,171 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:36:05,433 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0007_000001 (auth:SIMPLE) from 127.0.0.1:37282 2024-12-15T01:36:07,489 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:36:10,733 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0007_000001 (auth:SIMPLE) from 127.0.0.1:37434 2024-12-15T01:36:10,818 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:36:11,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742232_1408 (size=350490) 2024-12-15T01:36:11,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742232_1408 (size=350490) 2024-12-15T01:36:11,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742232_1408 (size=350490) 2024-12-15T01:36:11,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742233_1409 (size=8568) 2024-12-15T01:36:11,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742233_1409 (size=8568) 2024-12-15T01:36:11,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742233_1409 (size=8568) 2024-12-15T01:36:11,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742234_1410 (size=460) 2024-12-15T01:36:11,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742234_1410 (size=460) 2024-12-15T01:36:11,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742234_1410 (size=460) 2024-12-15T01:36:11,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742235_1411 (size=8568) 2024-12-15T01:36:11,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742235_1411 (size=8568) 2024-12-15T01:36:11,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742235_1411 (size=8568) 2024-12-15T01:36:11,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742236_1412 (size=350490) 2024-12-15T01:36:11,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742236_1412 (size=350490) 2024-12-15T01:36:11,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742236_1412 (size=350490) 2024-12-15T01:36:12,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-15T01:36:12,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-15T01:36:13,485 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:36:13,486 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:36:13,499 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:13,499 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:36:13,500 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:36:13,500 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:13,501 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-15T01:36:13,501 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-15T01:36:13,501 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:13,501 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-15T01:36:13,501 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226563750/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-15T01:36:13,508 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,509 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-15T01:36:13,512 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226573511"}]},"ts":"1734226573511"} 2024-12-15T01:36:13,513 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-15T01:36:13,545 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-15T01:36:13,545 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-15T01:36:13,546 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8019e6b666090168a1635a3c2749d379, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=5399b49682ac5742acedeb1201f18c71, UNASSIGN}] 2024-12-15T01:36:13,548 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=5399b49682ac5742acedeb1201f18c71, UNASSIGN 2024-12-15T01:36:13,548 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8019e6b666090168a1635a3c2749d379, UNASSIGN 2024-12-15T01:36:13,548 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=8019e6b666090168a1635a3c2749d379, regionState=CLOSING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:36:13,548 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=5399b49682ac5742acedeb1201f18c71, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:36:13,550 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:36:13,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE; CloseRegionProcedure 5399b49682ac5742acedeb1201f18c71, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:36:13,550 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:36:13,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=175, state=RUNNABLE; CloseRegionProcedure 8019e6b666090168a1635a3c2749d379, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:36:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-15T01:36:13,702 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:36:13,702 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:13,703 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:13,703 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 8019e6b666090168a1635a3c2749d379, disabling compactions & flushes 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing 5399b49682ac5742acedeb1201f18c71, disabling compactions & flushes 2024-12-15T01:36:13,703 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:13,703 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. after waiting 0 ms 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:13,703 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. after waiting 0 ms 2024-12-15T01:36:13,704 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:13,710 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:36:13,710 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:36:13,710 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:36:13,710 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:36:13,710 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379. 2024-12-15T01:36:13,710 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 8019e6b666090168a1635a3c2749d379: 2024-12-15T01:36:13,710 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71. 2024-12-15T01:36:13,711 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for 5399b49682ac5742acedeb1201f18c71: 2024-12-15T01:36:13,712 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed 5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:13,713 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=5399b49682ac5742acedeb1201f18c71, regionState=CLOSED 2024-12-15T01:36:13,713 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:13,713 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=8019e6b666090168a1635a3c2749d379, regionState=CLOSED 2024-12-15T01:36:13,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-12-15T01:36:13,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; CloseRegionProcedure 5399b49682ac5742acedeb1201f18c71, server=66d575e28c61,39381,1734226422872 in 164 msec 2024-12-15T01:36:13,715 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=175 2024-12-15T01:36:13,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=175, state=SUCCESS; CloseRegionProcedure 8019e6b666090168a1635a3c2749d379, server=66d575e28c61,42663,1734226422715 in 164 msec 2024-12-15T01:36:13,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=5399b49682ac5742acedeb1201f18c71, UNASSIGN in 169 msec 2024-12-15T01:36:13,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=175, resume processing ppid=174 2024-12-15T01:36:13,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8019e6b666090168a1635a3c2749d379, UNASSIGN in 170 msec 2024-12-15T01:36:13,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-15T01:36:13,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 172 msec 2024-12-15T01:36:13,719 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226573719"}]},"ts":"1734226573719"} 2024-12-15T01:36:13,720 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-15T01:36:13,728 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-15T01:36:13,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 219 msec 2024-12-15T01:36:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-15T01:36:13,814 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-15T01:36:13,814 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,816 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,816 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,817 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,819 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:13,819 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:13,820 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/recovered.edits] 2024-12-15T01:36:13,820 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/recovered.edits] 2024-12-15T01:36:13,824 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/cf/f2e87c473e88477280e5964e5fc9c539 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/cf/f2e87c473e88477280e5964e5fc9c539 2024-12-15T01:36:13,824 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/cf/b4405f8543bf49cface0c0da8ce5c0a2 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/cf/b4405f8543bf49cface0c0da8ce5c0a2 2024-12-15T01:36:13,826 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379/recovered.edits/9.seqid 2024-12-15T01:36:13,826 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71/recovered.edits/9.seqid 2024-12-15T01:36:13,826 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/5399b49682ac5742acedeb1201f18c71 2024-12-15T01:36:13,826 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testEmptyExportFileSystemState/8019e6b666090168a1635a3c2749d379 2024-12-15T01:36:13,826 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-15T01:36:13,828 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,831 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-15T01:36:13,832 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-15T01:36:13,834 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,834 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-15T01:36:13,834 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226573834"}]},"ts":"9223372036854775807"} 2024-12-15T01:36:13,834 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226573834"}]},"ts":"9223372036854775807"} 2024-12-15T01:36:13,836 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:36:13,836 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8019e6b666090168a1635a3c2749d379, NAME => 'testtb-testEmptyExportFileSystemState,,1734226562431.8019e6b666090168a1635a3c2749d379.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5399b49682ac5742acedeb1201f18c71, NAME => 'testtb-testEmptyExportFileSystemState,1,1734226562431.5399b49682ac5742acedeb1201f18c71.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:36:13,836 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-15T01:36:13,836 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226573836"}]},"ts":"9223372036854775807"} 2024-12-15T01:36:13,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-15T01:36:13,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-15T01:36:13,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-15T01:36:13,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-15T01:36:13,838 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-15T01:36:13,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:13,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:13,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:13,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:13,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-15T01:36:13,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:13,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:13,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:13,853 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-15T01:36:13,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:13,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 39 msec 2024-12-15T01:36:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-15T01:36:13,948 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-15T01:36:13,960 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-15T01:36:13,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:13,965 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-15T01:36:13,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-15T01:36:13,986 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=811 (was 798) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:35136 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5554 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41555 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:41769 from appattempt_1734226430009_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1744947919_1 at /127.0.0.1:48020 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-43 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:41555 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:45302 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 70100) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:48052 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1744947919_1 at /127.0.0.1:45274 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:40739 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x613351a8-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=821 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=417 (was 458), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3263 (was 4168) 2024-12-15T01:36:13,986 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-15T01:36:14,004 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=811, OpenFileDescriptor=821, MaxFileDescriptor=1048576, SystemLoadAverage=417, ProcessCount=17, AvailableMemoryMB=3262 2024-12-15T01:36:14,004 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-15T01:36:14,006 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:36:14,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:36:14,007 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:36:14,007 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:36:14,007 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-15T01:36:14,008 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:36:14,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-15T01:36:14,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742237_1413 (size=404) 2024-12-15T01:36:14,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742237_1413 (size=404) 2024-12-15T01:36:14,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742237_1413 (size=404) 2024-12-15T01:36:14,017 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 94dccfb6b7c1fa94d4218f6541830037, NAME => 'testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:14,017 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 94ef698a505d8085753d97885d444f16, NAME => 'testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:14,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742238_1414 (size=65) 2024-12-15T01:36:14,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742238_1414 (size=65) 2024-12-15T01:36:14,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742239_1415 (size=65) 2024-12-15T01:36:14,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742239_1415 (size=65) 2024-12-15T01:36:14,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742238_1414 (size=65) 2024-12-15T01:36:14,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742239_1415 (size=65) 2024-12-15T01:36:14,024 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:14,024 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing 94dccfb6b7c1fa94d4218f6541830037, disabling compactions & flushes 2024-12-15T01:36:14,024 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,024 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,024 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. after waiting 0 ms 2024-12-15T01:36:14,024 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,024 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,024 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for 94dccfb6b7c1fa94d4218f6541830037: 2024-12-15T01:36:14,029 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:14,029 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 94ef698a505d8085753d97885d444f16, disabling compactions & flushes 2024-12-15T01:36:14,029 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,029 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,029 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. after waiting 0 ms 2024-12-15T01:36:14,029 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,029 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,029 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 94ef698a505d8085753d97885d444f16: 2024-12-15T01:36:14,031 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:36:14,031 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734226574031"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226574031"}]},"ts":"1734226574031"} 2024-12-15T01:36:14,031 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734226574031"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226574031"}]},"ts":"1734226574031"} 2024-12-15T01:36:14,033 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:36:14,034 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:36:14,034 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226574034"}]},"ts":"1734226574034"} 2024-12-15T01:36:14,035 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-15T01:36:14,053 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:36:14,054 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:36:14,054 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:36:14,054 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:36:14,054 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:36:14,054 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:36:14,054 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:36:14,054 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:36:14,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94dccfb6b7c1fa94d4218f6541830037, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94ef698a505d8085753d97885d444f16, ASSIGN}] 2024-12-15T01:36:14,056 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94dccfb6b7c1fa94d4218f6541830037, ASSIGN 2024-12-15T01:36:14,056 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94ef698a505d8085753d97885d444f16, ASSIGN 2024-12-15T01:36:14,056 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94dccfb6b7c1fa94d4218f6541830037, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:36:14,057 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94ef698a505d8085753d97885d444f16, ASSIGN; state=OFFLINE, location=66d575e28c61,35281,1734226422799; forceNewPlan=false, retain=false 2024-12-15T01:36:14,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-15T01:36:14,207 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:36:14,207 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=94ef698a505d8085753d97885d444f16, regionState=OPENING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:36:14,207 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=94dccfb6b7c1fa94d4218f6541830037, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:36:14,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure 94ef698a505d8085753d97885d444f16, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:36:14,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure 94dccfb6b7c1fa94d4218f6541830037, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:36:14,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-15T01:36:14,361 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:36:14,361 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:14,364 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,365 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => 94dccfb6b7c1fa94d4218f6541830037, NAME => 'testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:36:14,365 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,365 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. service=AccessControlService 2024-12-15T01:36:14,365 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => 94ef698a505d8085753d97885d444f16, NAME => 'testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:36:14,365 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:36:14,365 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. service=AccessControlService 2024-12-15T01:36:14,365 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,365 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:14,365 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:36:14,365 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,366 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,366 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,366 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:36:14,366 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,366 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,375 INFO [StoreOpener-94ef698a505d8085753d97885d444f16-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,375 INFO [StoreOpener-94dccfb6b7c1fa94d4218f6541830037-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,376 INFO [StoreOpener-94ef698a505d8085753d97885d444f16-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94ef698a505d8085753d97885d444f16 columnFamilyName cf 2024-12-15T01:36:14,376 INFO [StoreOpener-94dccfb6b7c1fa94d4218f6541830037-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94dccfb6b7c1fa94d4218f6541830037 columnFamilyName cf 2024-12-15T01:36:14,376 DEBUG [StoreOpener-94ef698a505d8085753d97885d444f16-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:36:14,376 DEBUG [StoreOpener-94dccfb6b7c1fa94d4218f6541830037-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:36:14,376 INFO [StoreOpener-94ef698a505d8085753d97885d444f16-1 {}] regionserver.HStore(327): Store=94ef698a505d8085753d97885d444f16/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:36:14,376 INFO [StoreOpener-94dccfb6b7c1fa94d4218f6541830037-1 {}] regionserver.HStore(327): Store=94dccfb6b7c1fa94d4218f6541830037/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:36:14,377 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,377 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,377 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,377 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,379 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,379 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,380 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:36:14,380 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:36:14,381 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened 94dccfb6b7c1fa94d4218f6541830037; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72930283, jitterRate=0.08674590289592743}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:36:14,381 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened 94ef698a505d8085753d97885d444f16; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66793997, jitterRate=-0.00469188392162323}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:36:14,381 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for 94dccfb6b7c1fa94d4218f6541830037: 2024-12-15T01:36:14,381 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for 94ef698a505d8085753d97885d444f16: 2024-12-15T01:36:14,382 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037., pid=184, masterSystemTime=1734226574361 2024-12-15T01:36:14,382 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16., pid=183, masterSystemTime=1734226574361 2024-12-15T01:36:14,383 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,383 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,384 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=94dccfb6b7c1fa94d4218f6541830037, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:36:14,384 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,384 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,385 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=94ef698a505d8085753d97885d444f16, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:36:14,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181 2024-12-15T01:36:14,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-15T01:36:14,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure 94dccfb6b7c1fa94d4218f6541830037, server=66d575e28c61,39381,1734226422872 in 179 msec 2024-12-15T01:36:14,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure 94ef698a505d8085753d97885d444f16, server=66d575e28c61,35281,1734226422799 in 180 msec 2024-12-15T01:36:14,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94dccfb6b7c1fa94d4218f6541830037, ASSIGN in 336 msec 2024-12-15T01:36:14,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=180 2024-12-15T01:36:14,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94ef698a505d8085753d97885d444f16, ASSIGN in 337 msec 2024-12-15T01:36:14,397 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:36:14,397 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226574397"}]},"ts":"1734226574397"} 2024-12-15T01:36:14,398 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-15T01:36:14,437 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:36:14,437 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-15T01:36:14,440 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-15T01:36:14,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:14,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:14,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:14,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:36:14,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-15T01:36:14,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 447 msec 2024-12-15T01:36:14,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-15T01:36:14,612 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-15T01:36:14,612 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-15T01:36:14,612 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:36:14,617 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-15T01:36:14,617 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:36:14,617 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-15T01:36:14,620 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-15T01:36:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226574620 (current time:1734226574620). 2024-12-15T01:36:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:36:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-15T01:36:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:36:14,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x59b4c089 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a2f7740 2024-12-15T01:36:14,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b95cf69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:14,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:14,631 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:14,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x59b4c089 to 127.0.0.1:64248 2024-12-15T01:36:14,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:14,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c3e8a0a to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d5a61ba 2024-12-15T01:36:14,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68e11fff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:14,648 DEBUG [hconnection-0x560d4c8a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:14,649 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46722, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:14,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:14,653 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:14,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c3e8a0a to 127.0.0.1:64248 2024-12-15T01:36:14,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:14,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-15T01:36:14,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:36:14,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-15T01:36:14,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-15T01:36:14,658 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:36:14,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-15T01:36:14,659 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:36:14,662 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:36:14,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742240_1416 (size=161) 2024-12-15T01:36:14,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742240_1416 (size=161) 2024-12-15T01:36:14,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742240_1416 (size=161) 2024-12-15T01:36:14,667 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:36:14,668 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16}] 2024-12-15T01:36:14,668 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,668 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-15T01:36:14,819 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:36:14,819 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:14,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-15T01:36:14,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-15T01:36:14,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 94dccfb6b7c1fa94d4218f6541830037: 2024-12-15T01:36:14,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 94ef698a505d8085753d97885d444f16: 2024-12-15T01:36:14,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. for emptySnaptb0-testExportWithChecksum completed. 2024-12-15T01:36:14,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. for emptySnaptb0-testExportWithChecksum completed. 2024-12-15T01:36:14,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-15T01:36:14,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-15T01:36:14,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:14,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:14,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:36:14,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:36:14,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742241_1417 (size=68) 2024-12-15T01:36:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742241_1417 (size=68) 2024-12-15T01:36:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742241_1417 (size=68) 2024-12-15T01:36:14,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-15T01:36:14,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-15T01:36:14,834 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,834 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:14,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037 in 168 msec 2024-12-15T01:36:14,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742242_1418 (size=68) 2024-12-15T01:36:14,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742242_1418 (size=68) 2024-12-15T01:36:14,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742242_1418 (size=68) 2024-12-15T01:36:14,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:14,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-15T01:36:14,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-15T01:36:14,842 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,842 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:14,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=187, resume processing ppid=185 2024-12-15T01:36:14,843 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:36:14,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16 in 175 msec 2024-12-15T01:36:14,844 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:36:14,844 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:36:14,844 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-15T01:36:14,845 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-15T01:36:14,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742243_1419 (size=543) 2024-12-15T01:36:14,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742243_1419 (size=543) 2024-12-15T01:36:14,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742243_1419 (size=543) 2024-12-15T01:36:14,858 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:36:14,862 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:36:14,862 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-15T01:36:14,863 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:36:14,863 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-15T01:36:14,864 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 207 msec 2024-12-15T01:36:14,886 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-15T01:36:14,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-15T01:36:14,961 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-15T01:36:14,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:36:14,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:36:14,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-15T01:36:14,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:14,970 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:36:14,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-15T01:36:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226574981 (current time:1734226574981). 2024-12-15T01:36:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:36:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-15T01:36:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:36:14,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x601ce04f to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c4999da 2024-12-15T01:36:15,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d37f7e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:15,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:15,023 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x601ce04f to 127.0.0.1:64248 2024-12-15T01:36:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:15,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a969a28 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a015be9 2024-12-15T01:36:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@385b39ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:36:15,039 DEBUG [hconnection-0x3dcbc80a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:15,040 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:36:15,042 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:36:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a969a28 to 127.0.0.1:64248 2024-12-15T01:36:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:36:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-15T01:36:15,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:36:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-15T01:36:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-15T01:36:15,046 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:36:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-15T01:36:15,046 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:36:15,049 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:36:15,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742244_1420 (size=156) 2024-12-15T01:36:15,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742244_1420 (size=156) 2024-12-15T01:36:15,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742244_1420 (size=156) 2024-12-15T01:36:15,056 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:36:15,057 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16}] 2024-12-15T01:36:15,057 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:15,057 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-15T01:36:15,208 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:36:15,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:36:15,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-15T01:36:15,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-15T01:36:15,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:15,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:15,209 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing 94dccfb6b7c1fa94d4218f6541830037 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-15T01:36:15,209 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 94ef698a505d8085753d97885d444f16 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-15T01:36:15,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/.tmp/cf/aea8bb6875fd418bb8fafd0336911f8d is 71, key is 0670ecc8a821ce7827895700cad30db1/cf:q/1734226574967/Put/seqid=0 2024-12-15T01:36:15,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/.tmp/cf/9b1e0c980703471d9ffd156aff4a0114 is 71, key is 1249c4231c51d7b87a5ad004ba651ef8/cf:q/1734226574967/Put/seqid=0 2024-12-15T01:36:15,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742245_1421 (size=8256) 2024-12-15T01:36:15,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742245_1421 (size=8256) 2024-12-15T01:36:15,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742245_1421 (size=8256) 2024-12-15T01:36:15,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742246_1422 (size=5354) 2024-12-15T01:36:15,236 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/.tmp/cf/9b1e0c980703471d9ffd156aff4a0114 2024-12-15T01:36:15,237 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/.tmp/cf/aea8bb6875fd418bb8fafd0336911f8d 2024-12-15T01:36:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742246_1422 (size=5354) 2024-12-15T01:36:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742246_1422 (size=5354) 2024-12-15T01:36:15,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/.tmp/cf/9b1e0c980703471d9ffd156aff4a0114 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114 2024-12-15T01:36:15,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/.tmp/cf/aea8bb6875fd418bb8fafd0336911f8d as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/cf/aea8bb6875fd418bb8fafd0336911f8d 2024-12-15T01:36:15,245 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/cf/aea8bb6875fd418bb8fafd0336911f8d, entries=4, sequenceid=6, filesize=5.2 K 2024-12-15T01:36:15,245 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114, entries=46, sequenceid=6, filesize=8.1 K 2024-12-15T01:36:15,246 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 94dccfb6b7c1fa94d4218f6541830037 in 37ms, sequenceid=6, compaction requested=false 2024-12-15T01:36:15,246 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 94ef698a505d8085753d97885d444f16 in 37ms, sequenceid=6, compaction requested=false 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for 94dccfb6b7c1fa94d4218f6541830037: 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. for snaptb0-testExportWithChecksum completed. 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 94ef698a505d8085753d97885d444f16: 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. for snaptb0-testExportWithChecksum completed. 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/cf/aea8bb6875fd418bb8fafd0336911f8d] hfiles 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114] hfiles 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/cf/aea8bb6875fd418bb8fafd0336911f8d for snapshot=snaptb0-testExportWithChecksum 2024-12-15T01:36:15,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114 for snapshot=snaptb0-testExportWithChecksum 2024-12-15T01:36:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742247_1423 (size=107) 2024-12-15T01:36:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742248_1424 (size=107) 2024-12-15T01:36:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742247_1423 (size=107) 2024-12-15T01:36:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742247_1423 (size=107) 2024-12-15T01:36:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742248_1424 (size=107) 2024-12-15T01:36:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742248_1424 (size=107) 2024-12-15T01:36:15,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:36:15,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:36:15,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-15T01:36:15,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-15T01:36:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-15T01:36:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-15T01:36:15,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:15,259 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:15,259 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16 2024-12-15T01:36:15,259 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:36:15,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 94ef698a505d8085753d97885d444f16 in 203 msec 2024-12-15T01:36:15,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=189, resume processing ppid=188 2024-12-15T01:36:15,261 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:36:15,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure 94dccfb6b7c1fa94d4218f6541830037 in 203 msec 2024-12-15T01:36:15,262 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:36:15,262 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:36:15,262 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-15T01:36:15,263 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-15T01:36:15,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742249_1425 (size=621) 2024-12-15T01:36:15,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742249_1425 (size=621) 2024-12-15T01:36:15,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742249_1425 (size=621) 2024-12-15T01:36:15,274 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:36:15,278 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:36:15,278 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-15T01:36:15,283 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:36:15,283 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-15T01:36:15,285 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 239 msec 2024-12-15T01:36:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-15T01:36:15,349 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-15T01:36:15,350 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349 2024-12-15T01:36:15,350 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:15,373 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:15,373 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@24738cd4, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-15T01:36:15,375 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:36:15,378 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-15T01:36:15,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-4407620833256487562.jar 2024-12-15T01:36:15,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:15,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:15,539 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,321 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-1562335013540768064.jar 2024-12-15T01:36:16,321 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,321 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-1522835914111307673.jar 2024-12-15T01:36:16,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:16,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:36:16,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:36:16,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:36:16,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:36:16,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:36:16,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:36:16,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:36:16,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:36:16,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:36:16,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:36:16,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:36:16,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:36:16,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:16,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:16,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:16,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:16,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:16,381 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:16,381 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:16,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742250_1426 (size=127628) 2024-12-15T01:36:16,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742250_1426 (size=127628) 2024-12-15T01:36:16,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742250_1426 (size=127628) 2024-12-15T01:36:16,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742251_1427 (size=2172137) 2024-12-15T01:36:16,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742251_1427 (size=2172137) 2024-12-15T01:36:16,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742251_1427 (size=2172137) 2024-12-15T01:36:16,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742252_1428 (size=213228) 2024-12-15T01:36:16,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742252_1428 (size=213228) 2024-12-15T01:36:16,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742252_1428 (size=213228) 2024-12-15T01:36:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742253_1429 (size=1877034) 2024-12-15T01:36:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742253_1429 (size=1877034) 2024-12-15T01:36:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742253_1429 (size=1877034) 2024-12-15T01:36:16,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742254_1430 (size=533455) 2024-12-15T01:36:16,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742254_1430 (size=533455) 2024-12-15T01:36:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742254_1430 (size=533455) 2024-12-15T01:36:16,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742255_1431 (size=7280644) 2024-12-15T01:36:16,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742255_1431 (size=7280644) 2024-12-15T01:36:16,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742255_1431 (size=7280644) 2024-12-15T01:36:16,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742256_1432 (size=4188619) 2024-12-15T01:36:16,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742256_1432 (size=4188619) 2024-12-15T01:36:16,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742256_1432 (size=4188619) 2024-12-15T01:36:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742257_1433 (size=20406) 2024-12-15T01:36:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742257_1433 (size=20406) 2024-12-15T01:36:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742257_1433 (size=20406) 2024-12-15T01:36:16,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742258_1434 (size=75495) 2024-12-15T01:36:16,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742258_1434 (size=75495) 2024-12-15T01:36:16,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742258_1434 (size=75495) 2024-12-15T01:36:16,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742259_1435 (size=45609) 2024-12-15T01:36:16,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742259_1435 (size=45609) 2024-12-15T01:36:16,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742259_1435 (size=45609) 2024-12-15T01:36:16,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742260_1436 (size=110084) 2024-12-15T01:36:16,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742260_1436 (size=110084) 2024-12-15T01:36:16,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742260_1436 (size=110084) 2024-12-15T01:36:16,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742261_1437 (size=1323991) 2024-12-15T01:36:16,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742261_1437 (size=1323991) 2024-12-15T01:36:16,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742261_1437 (size=1323991) 2024-12-15T01:36:16,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742262_1438 (size=23076) 2024-12-15T01:36:16,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742262_1438 (size=23076) 2024-12-15T01:36:16,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742262_1438 (size=23076) 2024-12-15T01:36:16,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742263_1439 (size=126803) 2024-12-15T01:36:16,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742263_1439 (size=126803) 2024-12-15T01:36:16,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742263_1439 (size=126803) 2024-12-15T01:36:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742264_1440 (size=322274) 2024-12-15T01:36:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742264_1440 (size=322274) 2024-12-15T01:36:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742264_1440 (size=322274) 2024-12-15T01:36:16,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742265_1441 (size=1832290) 2024-12-15T01:36:16,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742265_1441 (size=1832290) 2024-12-15T01:36:16,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742265_1441 (size=1832290) 2024-12-15T01:36:17,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742266_1442 (size=30081) 2024-12-15T01:36:17,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742266_1442 (size=30081) 2024-12-15T01:36:17,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742266_1442 (size=30081) 2024-12-15T01:36:17,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742267_1443 (size=53616) 2024-12-15T01:36:17,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742267_1443 (size=53616) 2024-12-15T01:36:17,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742267_1443 (size=53616) 2024-12-15T01:36:17,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742268_1444 (size=29229) 2024-12-15T01:36:17,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742268_1444 (size=29229) 2024-12-15T01:36:17,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742268_1444 (size=29229) 2024-12-15T01:36:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742269_1445 (size=169089) 2024-12-15T01:36:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742269_1445 (size=169089) 2024-12-15T01:36:17,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742269_1445 (size=169089) 2024-12-15T01:36:17,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742270_1446 (size=5175431) 2024-12-15T01:36:17,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742270_1446 (size=5175431) 2024-12-15T01:36:17,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742270_1446 (size=5175431) 2024-12-15T01:36:17,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742271_1447 (size=136454) 2024-12-15T01:36:17,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742271_1447 (size=136454) 2024-12-15T01:36:17,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742271_1447 (size=136454) 2024-12-15T01:36:17,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742272_1448 (size=912094) 2024-12-15T01:36:17,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742272_1448 (size=912094) 2024-12-15T01:36:17,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742272_1448 (size=912094) 2024-12-15T01:36:17,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742273_1449 (size=451756) 2024-12-15T01:36:17,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742273_1449 (size=451756) 2024-12-15T01:36:17,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742273_1449 (size=451756) 2024-12-15T01:36:17,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742274_1450 (size=3317408) 2024-12-15T01:36:17,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742274_1450 (size=3317408) 2024-12-15T01:36:17,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742274_1450 (size=3317408) 2024-12-15T01:36:17,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742275_1451 (size=6350909) 2024-12-15T01:36:17,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742275_1451 (size=6350909) 2024-12-15T01:36:17,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742275_1451 (size=6350909) 2024-12-15T01:36:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742276_1452 (size=503880) 2024-12-15T01:36:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742276_1452 (size=503880) 2024-12-15T01:36:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742276_1452 (size=503880) 2024-12-15T01:36:17,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742277_1453 (size=4695811) 2024-12-15T01:36:17,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742277_1453 (size=4695811) 2024-12-15T01:36:17,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742277_1453 (size=4695811) 2024-12-15T01:36:17,156 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:36:17,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-15T01:36:17,161 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:36:17,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742278_1454 (size=338) 2024-12-15T01:36:17,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742278_1454 (size=338) 2024-12-15T01:36:17,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742278_1454 (size=338) 2024-12-15T01:36:17,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742279_1455 (size=15) 2024-12-15T01:36:17,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742279_1455 (size=15) 2024-12-15T01:36:17,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742279_1455 (size=15) 2024-12-15T01:36:17,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742280_1456 (size=304977) 2024-12-15T01:36:17,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742280_1456 (size=304977) 2024-12-15T01:36:17,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742280_1456 (size=304977) 2024-12-15T01:36:18,054 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:36:18,054 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:36:18,057 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0007_000001 (auth:SIMPLE) from 127.0.0.1:56186 2024-12-15T01:36:18,066 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_0/usercache/jenkins/appcache/application_1734226430009_0007/container_1734226430009_0007_01_000001/launch_container.sh] 2024-12-15T01:36:18,067 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_0/usercache/jenkins/appcache/application_1734226430009_0007/container_1734226430009_0007_01_000001/container_tokens] 2024-12-15T01:36:18,067 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_0/usercache/jenkins/appcache/application_1734226430009_0007/container_1734226430009_0007_01_000001/sysfs] 2024-12-15T01:36:18,575 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0008_000001 (auth:SIMPLE) from 127.0.0.1:39864 2024-12-15T01:36:19,236 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:36:22,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-15T01:36:22,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-15T01:36:22,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-15T01:36:23,170 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0008_000001 (auth:SIMPLE) from 127.0.0.1:36372 2024-12-15T01:36:23,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742281_1457 (size=350651) 2024-12-15T01:36:23,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742281_1457 (size=350651) 2024-12-15T01:36:23,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742281_1457 (size=350651) 2024-12-15T01:36:25,415 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0008_000001 (auth:SIMPLE) from 127.0.0.1:37860 2024-12-15T01:36:27,796 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:36:28,200 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_3/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000002/launch_container.sh] 2024-12-15T01:36:28,200 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_3/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000002/container_tokens] 2024-12-15T01:36:28,200 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_3/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349/archive/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-15T01:36:29,271 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0008_000001 (auth:SIMPLE) from 127.0.0.1:44844 2024-12-15T01:36:32,617 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_0/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000003/launch_container.sh] 2024-12-15T01:36:32,617 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_0/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000003/container_tokens] 2024-12-15T01:36:32,617 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_0/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349/archive/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-15T01:36:34,282 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0008_000001 (auth:SIMPLE) from 127.0.0.1:49608 2024-12-15T01:36:36,077 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6e4491bba5f2e14d8ad41ac86f60d6f5, had cached 0 bytes from a total of 8120 2024-12-15T01:36:36,079 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region eded217274ea249d8fe0b60f2e3037a1, had cached 0 bytes from a total of 5490 2024-12-15T01:36:36,910 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_3/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000004/launch_container.sh] 2024-12-15T01:36:36,910 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_3/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000004/container_tokens] 2024-12-15T01:36:36,910 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_3/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/local-export-1734226575349/archive/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-15T01:36:38,303 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0008_000001 (auth:SIMPLE) from 127.0.0.1:49622 2024-12-15T01:36:40,818 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:36:40,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742282_1458 (size=21340) 2024-12-15T01:36:40,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742282_1458 (size=21340) 2024-12-15T01:36:40,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742282_1458 (size=21340) 2024-12-15T01:36:41,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742283_1459 (size=460) 2024-12-15T01:36:41,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742283_1459 (size=460) 2024-12-15T01:36:41,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742283_1459 (size=460) 2024-12-15T01:36:41,055 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000005/launch_container.sh] 2024-12-15T01:36:41,055 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000005/container_tokens] 2024-12-15T01:36:41,055 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_1/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000005/sysfs] 2024-12-15T01:36:41,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742284_1460 (size=21340) 2024-12-15T01:36:41,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742284_1460 (size=21340) 2024-12-15T01:36:41,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742284_1460 (size=21340) 2024-12-15T01:36:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742285_1461 (size=350651) 2024-12-15T01:36:41,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742285_1461 (size=350651) 2024-12-15T01:36:41,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742285_1461 (size=350651) 2024-12-15T01:36:43,475 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1734226430009_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[classes/:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:36:43,476 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476 2024-12-15T01:36:43,477 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:43,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:36:43,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-15T01:36:43,518 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:36:43,523 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-15T01:36:43,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742286_1462 (size=156) 2024-12-15T01:36:43,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742286_1462 (size=156) 2024-12-15T01:36:43,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742286_1462 (size=156) 2024-12-15T01:36:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742287_1463 (size=621) 2024-12-15T01:36:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742287_1463 (size=621) 2024-12-15T01:36:43,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742287_1463 (size=621) 2024-12-15T01:36:43,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-5861284116939183990.jar 2024-12-15T01:36:43,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:43,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:43,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-532190739615759309.jar 2024-12-15T01:36:44,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-13779412471465490582.jar 2024-12-15T01:36:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:36:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:36:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:36:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:36:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:36:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:36:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:36:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:36:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:36:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:36:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:36:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:36:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:36:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:44,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:36:44,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:44,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:36:44,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742288_1464 (size=127628) 2024-12-15T01:36:44,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742288_1464 (size=127628) 2024-12-15T01:36:44,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742288_1464 (size=127628) 2024-12-15T01:36:44,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742289_1465 (size=2172137) 2024-12-15T01:36:44,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742289_1465 (size=2172137) 2024-12-15T01:36:44,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742289_1465 (size=2172137) 2024-12-15T01:36:44,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742290_1466 (size=213228) 2024-12-15T01:36:44,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742290_1466 (size=213228) 2024-12-15T01:36:44,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742290_1466 (size=213228) 2024-12-15T01:36:44,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742291_1467 (size=1877034) 2024-12-15T01:36:44,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742291_1467 (size=1877034) 2024-12-15T01:36:44,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742291_1467 (size=1877034) 2024-12-15T01:36:44,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742292_1468 (size=533455) 2024-12-15T01:36:44,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742292_1468 (size=533455) 2024-12-15T01:36:44,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742292_1468 (size=533455) 2024-12-15T01:36:44,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742293_1469 (size=7280644) 2024-12-15T01:36:44,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742293_1469 (size=7280644) 2024-12-15T01:36:44,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742293_1469 (size=7280644) 2024-12-15T01:36:44,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742294_1470 (size=4188619) 2024-12-15T01:36:44,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742294_1470 (size=4188619) 2024-12-15T01:36:44,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742294_1470 (size=4188619) 2024-12-15T01:36:44,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742295_1471 (size=20406) 2024-12-15T01:36:44,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742295_1471 (size=20406) 2024-12-15T01:36:44,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742295_1471 (size=20406) 2024-12-15T01:36:44,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742296_1472 (size=75495) 2024-12-15T01:36:44,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742296_1472 (size=75495) 2024-12-15T01:36:44,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742296_1472 (size=75495) 2024-12-15T01:36:44,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742297_1473 (size=45609) 2024-12-15T01:36:44,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742297_1473 (size=45609) 2024-12-15T01:36:44,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742297_1473 (size=45609) 2024-12-15T01:36:44,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742298_1474 (size=110084) 2024-12-15T01:36:44,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742298_1474 (size=110084) 2024-12-15T01:36:44,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742298_1474 (size=110084) 2024-12-15T01:36:44,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742299_1475 (size=1323991) 2024-12-15T01:36:44,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742299_1475 (size=1323991) 2024-12-15T01:36:44,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742299_1475 (size=1323991) 2024-12-15T01:36:44,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742300_1476 (size=23076) 2024-12-15T01:36:44,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742300_1476 (size=23076) 2024-12-15T01:36:44,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742300_1476 (size=23076) 2024-12-15T01:36:44,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742301_1477 (size=126803) 2024-12-15T01:36:44,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742301_1477 (size=126803) 2024-12-15T01:36:44,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742301_1477 (size=126803) 2024-12-15T01:36:44,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742302_1478 (size=322274) 2024-12-15T01:36:44,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742302_1478 (size=322274) 2024-12-15T01:36:44,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742302_1478 (size=322274) 2024-12-15T01:36:44,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742303_1479 (size=1832290) 2024-12-15T01:36:44,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742303_1479 (size=1832290) 2024-12-15T01:36:44,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742303_1479 (size=1832290) 2024-12-15T01:36:44,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742304_1480 (size=30081) 2024-12-15T01:36:44,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742304_1480 (size=30081) 2024-12-15T01:36:44,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742304_1480 (size=30081) 2024-12-15T01:36:44,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742305_1481 (size=53616) 2024-12-15T01:36:44,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742305_1481 (size=53616) 2024-12-15T01:36:44,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742305_1481 (size=53616) 2024-12-15T01:36:44,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742306_1482 (size=29229) 2024-12-15T01:36:44,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742306_1482 (size=29229) 2024-12-15T01:36:44,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742306_1482 (size=29229) 2024-12-15T01:36:44,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742307_1483 (size=169089) 2024-12-15T01:36:44,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742307_1483 (size=169089) 2024-12-15T01:36:44,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742307_1483 (size=169089) 2024-12-15T01:36:44,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742308_1484 (size=6350909) 2024-12-15T01:36:44,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742308_1484 (size=6350909) 2024-12-15T01:36:44,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742308_1484 (size=6350909) 2024-12-15T01:36:44,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742309_1485 (size=912094) 2024-12-15T01:36:44,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742309_1485 (size=912094) 2024-12-15T01:36:44,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742309_1485 (size=912094) 2024-12-15T01:36:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742310_1486 (size=5175431) 2024-12-15T01:36:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742310_1486 (size=5175431) 2024-12-15T01:36:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742310_1486 (size=5175431) 2024-12-15T01:36:44,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742311_1487 (size=136454) 2024-12-15T01:36:44,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742311_1487 (size=136454) 2024-12-15T01:36:44,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742311_1487 (size=136454) 2024-12-15T01:36:44,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742312_1488 (size=3317408) 2024-12-15T01:36:44,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742312_1488 (size=3317408) 2024-12-15T01:36:44,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742312_1488 (size=3317408) 2024-12-15T01:36:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742313_1489 (size=451756) 2024-12-15T01:36:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742313_1489 (size=451756) 2024-12-15T01:36:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742313_1489 (size=451756) 2024-12-15T01:36:44,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742314_1490 (size=503880) 2024-12-15T01:36:44,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742314_1490 (size=503880) 2024-12-15T01:36:44,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742314_1490 (size=503880) 2024-12-15T01:36:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742315_1491 (size=4695811) 2024-12-15T01:36:44,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742315_1491 (size=4695811) 2024-12-15T01:36:44,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742315_1491 (size=4695811) 2024-12-15T01:36:44,861 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:36:44,863 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-15T01:36:44,864 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:36:44,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742316_1492 (size=338) 2024-12-15T01:36:44,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742316_1492 (size=338) 2024-12-15T01:36:44,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742316_1492 (size=338) 2024-12-15T01:36:44,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742317_1493 (size=15) 2024-12-15T01:36:44,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742317_1493 (size=15) 2024-12-15T01:36:44,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742317_1493 (size=15) 2024-12-15T01:36:44,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742318_1494 (size=304927) 2024-12-15T01:36:44,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742318_1494 (size=304927) 2024-12-15T01:36:44,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742318_1494 (size=304927) 2024-12-15T01:36:45,492 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 94ef698a505d8085753d97885d444f16 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:36:45,493 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 6e4491bba5f2e14d8ad41ac86f60d6f5 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:36:45,493 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 94dccfb6b7c1fa94d4218f6541830037 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:36:45,493 DEBUG [master/66d575e28c61:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region eded217274ea249d8fe0b60f2e3037a1 changed from -1.0 to 0.0, refreshing cache 2024-12-15T01:36:47,518 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:36:47,519 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:36:47,522 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0008_000001 (auth:SIMPLE) from 127.0.0.1:33024 2024-12-15T01:36:47,531 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000001/launch_container.sh] 2024-12-15T01:36:47,531 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000001/container_tokens] 2024-12-15T01:36:47,532 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_1/usercache/jenkins/appcache/application_1734226430009_0008/container_1734226430009_0008_01_000001/sysfs] 2024-12-15T01:36:48,053 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0009_000001 (auth:SIMPLE) from 127.0.0.1:45022 2024-12-15T01:36:54,070 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0009_000001 (auth:SIMPLE) from 127.0.0.1:57670 2024-12-15T01:36:54,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742319_1495 (size=350601) 2024-12-15T01:36:54,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742319_1495 (size=350601) 2024-12-15T01:36:54,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742319_1495 (size=350601) 2024-12-15T01:36:56,279 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0009_000001 (auth:SIMPLE) from 127.0.0.1:34196 2024-12-15T01:36:59,366 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 94dccfb6b7c1fa94d4218f6541830037, had cached 0 bytes from a total of 5354 2024-12-15T01:36:59,366 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 94ef698a505d8085753d97885d444f16, had cached 0 bytes from a total of 8256 2024-12-15T01:36:59,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742320_1496 (size=8256) 2024-12-15T01:36:59,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742320_1496 (size=8256) 2024-12-15T01:36:59,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742320_1496 (size=8256) 2024-12-15T01:36:59,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742321_1497 (size=5354) 2024-12-15T01:36:59,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742321_1497 (size=5354) 2024-12-15T01:36:59,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742321_1497 (size=5354) 2024-12-15T01:36:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742322_1498 (size=17413) 2024-12-15T01:36:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742322_1498 (size=17413) 2024-12-15T01:36:59,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742322_1498 (size=17413) 2024-12-15T01:36:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742323_1499 (size=462) 2024-12-15T01:36:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742323_1499 (size=462) 2024-12-15T01:36:59,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742323_1499 (size=462) 2024-12-15T01:36:59,788 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_3/usercache/jenkins/appcache/application_1734226430009_0009/container_1734226430009_0009_01_000002/launch_container.sh] 2024-12-15T01:36:59,788 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_3/usercache/jenkins/appcache/application_1734226430009_0009/container_1734226430009_0009_01_000002/container_tokens] 2024-12-15T01:36:59,788 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_3/usercache/jenkins/appcache/application_1734226430009_0009/container_1734226430009_0009_01_000002/sysfs] 2024-12-15T01:36:59,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742324_1500 (size=17413) 2024-12-15T01:36:59,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742324_1500 (size=17413) 2024-12-15T01:36:59,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742324_1500 (size=17413) 2024-12-15T01:36:59,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742325_1501 (size=350601) 2024-12-15T01:36:59,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742325_1501 (size=350601) 2024-12-15T01:36:59,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742325_1501 (size=350601) 2024-12-15T01:36:59,842 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0009_000001 (auth:SIMPLE) from 127.0.0.1:34212 2024-12-15T01:37:01,035 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:37:01,037 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:37:01,052 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-15T01:37:01,052 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:37:01,053 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:37:01,053 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-15T01:37:01,053 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-15T01:37:01,053 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-15T01:37:01,053 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-15T01:37:01,054 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-15T01:37:01,054 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226603476/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-15T01:37:01,061 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-15T01:37:01,061 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-15T01:37:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:37:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-15T01:37:01,064 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226621064"}]},"ts":"1734226621064"} 2024-12-15T01:37:01,066 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-15T01:37:01,111 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-15T01:37:01,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-15T01:37:01,113 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94dccfb6b7c1fa94d4218f6541830037, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94ef698a505d8085753d97885d444f16, UNASSIGN}] 2024-12-15T01:37:01,113 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94ef698a505d8085753d97885d444f16, UNASSIGN 2024-12-15T01:37:01,113 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94dccfb6b7c1fa94d4218f6541830037, UNASSIGN 2024-12-15T01:37:01,114 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=94dccfb6b7c1fa94d4218f6541830037, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:37:01,114 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=94ef698a505d8085753d97885d444f16, regionState=CLOSING, regionLocation=66d575e28c61,35281,1734226422799 2024-12-15T01:37:01,115 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:37:01,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=193, state=RUNNABLE; CloseRegionProcedure 94dccfb6b7c1fa94d4218f6541830037, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:37:01,115 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:37:01,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE; CloseRegionProcedure 94ef698a505d8085753d97885d444f16, server=66d575e28c61,35281,1734226422799}] 2024-12-15T01:37:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-15T01:37:01,267 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,35281,1734226422799 2024-12-15T01:37:01,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:37:01,267 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:37:01,267 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close 94ef698a505d8085753d97885d444f16 2024-12-15T01:37:01,267 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:37:01,267 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:37:01,267 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing 94dccfb6b7c1fa94d4218f6541830037, disabling compactions & flushes 2024-12-15T01:37:01,267 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:37:01,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:37:01,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing 94ef698a505d8085753d97885d444f16, disabling compactions & flushes 2024-12-15T01:37:01,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. after waiting 0 ms 2024-12-15T01:37:01,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:37:01,268 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:37:01,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:37:01,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. after waiting 0 ms 2024-12-15T01:37:01,268 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:37:01,273 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:37:01,273 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:37:01,273 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:01,273 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:01,273 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037. 2024-12-15T01:37:01,273 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for 94dccfb6b7c1fa94d4218f6541830037: 2024-12-15T01:37:01,273 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16. 2024-12-15T01:37:01,273 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for 94ef698a505d8085753d97885d444f16: 2024-12-15T01:37:01,275 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed 94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:37:01,275 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=94dccfb6b7c1fa94d4218f6541830037, regionState=CLOSED 2024-12-15T01:37:01,275 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed 94ef698a505d8085753d97885d444f16 2024-12-15T01:37:01,275 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=94ef698a505d8085753d97885d444f16, regionState=CLOSED 2024-12-15T01:37:01,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=193 2024-12-15T01:37:01,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=194 2024-12-15T01:37:01,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=194, state=SUCCESS; CloseRegionProcedure 94ef698a505d8085753d97885d444f16, server=66d575e28c61,35281,1734226422799 in 161 msec 2024-12-15T01:37:01,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=193, state=SUCCESS; CloseRegionProcedure 94dccfb6b7c1fa94d4218f6541830037, server=66d575e28c61,39381,1734226422872 in 161 msec 2024-12-15T01:37:01,279 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94dccfb6b7c1fa94d4218f6541830037, UNASSIGN in 165 msec 2024-12-15T01:37:01,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=192 2024-12-15T01:37:01,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=94ef698a505d8085753d97885d444f16, UNASSIGN in 165 msec 2024-12-15T01:37:01,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-15T01:37:01,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 168 msec 2024-12-15T01:37:01,282 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226621282"}]},"ts":"1734226621282"} 2024-12-15T01:37:01,283 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-15T01:37:01,294 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-15T01:37:01,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 234 msec 2024-12-15T01:37:01,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-15T01:37:01,366 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-15T01:37:01,367 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-15T01:37:01,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:37:01,369 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:37:01,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-15T01:37:01,370 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:37:01,372 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-15T01:37:01,374 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:37:01,374 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16 2024-12-15T01:37:01,378 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/recovered.edits] 2024-12-15T01:37:01,378 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/recovered.edits] 2024-12-15T01:37:01,383 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/cf/aea8bb6875fd418bb8fafd0336911f8d to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/cf/aea8bb6875fd418bb8fafd0336911f8d 2024-12-15T01:37:01,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-15T01:37:01,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-15T01:37:01,387 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-15T01:37:01,387 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-15T01:37:01,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:01,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:01,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:01,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-15T01:37:01,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:01,396 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:01,396 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:01,396 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:01,396 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:01,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-15T01:37:01,402 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/cf/9b1e0c980703471d9ffd156aff4a0114 2024-12-15T01:37:01,402 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037/recovered.edits/9.seqid 2024-12-15T01:37:01,403 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94dccfb6b7c1fa94d4218f6541830037 2024-12-15T01:37:01,405 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16/recovered.edits/9.seqid 2024-12-15T01:37:01,405 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportWithChecksum/94ef698a505d8085753d97885d444f16 2024-12-15T01:37:01,406 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-15T01:37:01,409 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:37:01,412 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-15T01:37:01,415 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-15T01:37:01,417 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:37:01,417 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-15T01:37:01,417 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226621417"}]},"ts":"9223372036854775807"} 2024-12-15T01:37:01,418 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226621417"}]},"ts":"9223372036854775807"} 2024-12-15T01:37:01,420 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:37:01,420 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 94dccfb6b7c1fa94d4218f6541830037, NAME => 'testtb-testExportWithChecksum,,1734226574005.94dccfb6b7c1fa94d4218f6541830037.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 94ef698a505d8085753d97885d444f16, NAME => 'testtb-testExportWithChecksum,1,1734226574005.94ef698a505d8085753d97885d444f16.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:37:01,420 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-15T01:37:01,420 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226621420"}]},"ts":"9223372036854775807"} 2024-12-15T01:37:01,422 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-15T01:37:01,428 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-15T01:37:01,429 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 61 msec 2024-12-15T01:37:01,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-15T01:37:01,497 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-15T01:37:01,502 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-15T01:37:01,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-15T01:37:01,504 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-15T01:37:01,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-15T01:37:01,524 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=817 (was 811) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-46 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-47 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (309020234) connection to localhost/127.0.0.1:45789 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_121344094_1 at /127.0.0.1:41020 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_121344094_1 at /127.0.0.1:37056 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-48 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:53500 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45789 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 76264) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6858 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:55898 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-778768832_22 at /127.0.0.1:56434 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42491 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x613351a8-shared-pool-45 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=819 (was 821), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=370 (was 417), ProcessCount=17 (was 17), AvailableMemoryMB=2870 (was 3262) 2024-12-15T01:37:01,524 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-15T01:37:01,541 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=817, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=370, ProcessCount=17, AvailableMemoryMB=2870 2024-12-15T01:37:01,541 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-15T01:37:01,543 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T01:37:01,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:01,544 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T01:37:01,544 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:37:01,544 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-15T01:37:01,545 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T01:37:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-15T01:37:01,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742326_1502 (size=418) 2024-12-15T01:37:01,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742326_1502 (size=418) 2024-12-15T01:37:01,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742326_1502 (size=418) 2024-12-15T01:37:01,553 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0df583d2bd60c0fb93df75ab79af75ab, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:37:01,553 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1d4794c035c3c87db54e27f1a3f30bba, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:37:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742327_1503 (size=79) 2024-12-15T01:37:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742327_1503 (size=79) 2024-12-15T01:37:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742327_1503 (size=79) 2024-12-15T01:37:01,561 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:37:01,562 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing 1d4794c035c3c87db54e27f1a3f30bba, disabling compactions & flushes 2024-12-15T01:37:01,562 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:01,562 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742328_1504 (size=79) 2024-12-15T01:37:01,562 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. after waiting 0 ms 2024-12-15T01:37:01,562 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:01,562 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:01,562 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for 1d4794c035c3c87db54e27f1a3f30bba: 2024-12-15T01:37:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742328_1504 (size=79) 2024-12-15T01:37:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742328_1504 (size=79) 2024-12-15T01:37:01,562 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:37:01,562 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing 0df583d2bd60c0fb93df75ab79af75ab, disabling compactions & flushes 2024-12-15T01:37:01,563 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:01,563 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:01,563 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. after waiting 0 ms 2024-12-15T01:37:01,563 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:01,563 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:01,563 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0df583d2bd60c0fb93df75ab79af75ab: 2024-12-15T01:37:01,563 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T01:37:01,563 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734226621563"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226621563"}]},"ts":"1734226621563"} 2024-12-15T01:37:01,564 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734226621563"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734226621563"}]},"ts":"1734226621563"} 2024-12-15T01:37:01,565 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-15T01:37:01,566 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T01:37:01,566 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226621566"}]},"ts":"1734226621566"} 2024-12-15T01:37:01,567 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-15T01:37:01,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-15T01:37:01,844 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {66d575e28c61=0} racks are {/default-rack=0} 2024-12-15T01:37:01,847 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-15T01:37:01,847 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-15T01:37:01,847 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-15T01:37:01,847 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-15T01:37:01,847 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-15T01:37:01,847 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-15T01:37:01,847 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-15T01:37:01,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0df583d2bd60c0fb93df75ab79af75ab, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=1d4794c035c3c87db54e27f1a3f30bba, ASSIGN}] 2024-12-15T01:37:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-15T01:37:01,848 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=1d4794c035c3c87db54e27f1a3f30bba, ASSIGN 2024-12-15T01:37:01,849 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0df583d2bd60c0fb93df75ab79af75ab, ASSIGN 2024-12-15T01:37:01,849 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=1d4794c035c3c87db54e27f1a3f30bba, ASSIGN; state=OFFLINE, location=66d575e28c61,42663,1734226422715; forceNewPlan=false, retain=false 2024-12-15T01:37:01,849 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0df583d2bd60c0fb93df75ab79af75ab, ASSIGN; state=OFFLINE, location=66d575e28c61,39381,1734226422872; forceNewPlan=false, retain=false 2024-12-15T01:37:02,000 INFO [66d575e28c61:40789 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-15T01:37:02,000 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=1d4794c035c3c87db54e27f1a3f30bba, regionState=OPENING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:37:02,000 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=0df583d2bd60c0fb93df75ab79af75ab, regionState=OPENING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:37:02,001 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE; OpenRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:37:02,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE; OpenRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:37:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-15T01:37:02,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:37:02,154 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:37:02,157 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:02,157 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:02,157 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => 0df583d2bd60c0fb93df75ab79af75ab, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.', STARTKEY => '', ENDKEY => '1'} 2024-12-15T01:37:02,157 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => 1d4794c035c3c87db54e27f1a3f30bba, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.', STARTKEY => '1', ENDKEY => ''} 2024-12-15T01:37:02,157 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. service=AccessControlService 2024-12-15T01:37:02,157 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. service=AccessControlService 2024-12-15T01:37:02,157 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:37:02,157 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,158 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,159 INFO [StoreOpener-1d4794c035c3c87db54e27f1a3f30bba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,159 INFO [StoreOpener-0df583d2bd60c0fb93df75ab79af75ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,160 INFO [StoreOpener-1d4794c035c3c87db54e27f1a3f30bba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d4794c035c3c87db54e27f1a3f30bba columnFamilyName cf 2024-12-15T01:37:02,160 INFO [StoreOpener-0df583d2bd60c0fb93df75ab79af75ab-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0df583d2bd60c0fb93df75ab79af75ab columnFamilyName cf 2024-12-15T01:37:02,160 DEBUG [StoreOpener-1d4794c035c3c87db54e27f1a3f30bba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:37:02,160 DEBUG [StoreOpener-0df583d2bd60c0fb93df75ab79af75ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T01:37:02,161 INFO [StoreOpener-1d4794c035c3c87db54e27f1a3f30bba-1 {}] regionserver.HStore(327): Store=1d4794c035c3c87db54e27f1a3f30bba/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:37:02,161 INFO [StoreOpener-0df583d2bd60c0fb93df75ab79af75ab-1 {}] regionserver.HStore(327): Store=0df583d2bd60c0fb93df75ab79af75ab/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T01:37:02,162 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,162 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,162 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,162 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,163 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,164 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,165 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:37:02,165 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T01:37:02,165 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened 0df583d2bd60c0fb93df75ab79af75ab; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66542013, jitterRate=-0.008446738123893738}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:37:02,165 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened 1d4794c035c3c87db54e27f1a3f30bba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72603535, jitterRate=0.0818769782781601}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T01:37:02,166 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for 0df583d2bd60c0fb93df75ab79af75ab: 2024-12-15T01:37:02,166 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for 1d4794c035c3c87db54e27f1a3f30bba: 2024-12-15T01:37:02,167 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab., pid=202, masterSystemTime=1734226622154 2024-12-15T01:37:02,167 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba., pid=201, masterSystemTime=1734226622153 2024-12-15T01:37:02,168 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:02,168 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:02,168 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=1d4794c035c3c87db54e27f1a3f30bba, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:37:02,168 DEBUG [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:02,168 INFO [RS_OPEN_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:02,169 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=0df583d2bd60c0fb93df75ab79af75ab, regionState=OPEN, openSeqNum=2, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:37:02,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=200 2024-12-15T01:37:02,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=200, state=SUCCESS; OpenRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba, server=66d575e28c61,42663,1734226422715 in 168 msec 2024-12-15T01:37:02,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=199 2024-12-15T01:37:02,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=199, state=SUCCESS; OpenRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab, server=66d575e28c61,39381,1734226422872 in 168 msec 2024-12-15T01:37:02,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=1d4794c035c3c87db54e27f1a3f30bba, ASSIGN in 323 msec 2024-12-15T01:37:02,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198 2024-12-15T01:37:02,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0df583d2bd60c0fb93df75ab79af75ab, ASSIGN in 324 msec 2024-12-15T01:37:02,173 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T01:37:02,173 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226622173"}]},"ts":"1734226622173"} 2024-12-15T01:37:02,174 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-15T01:37:02,194 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T01:37:02,195 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-15T01:37:02,196 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-15T01:37:02,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:02,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:02,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:02,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:02,246 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,246 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,246 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:02,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 702 msec 2024-12-15T01:37:02,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:02,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-15T01:37:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-15T01:37:02,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-15T01:37:02,651 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-15T01:37:02,651 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-15T01:37:02,652 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:37:02,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42663 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info 2024-12-15T01:37:02,662 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-15T01:37:02,662 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:37:02,662 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-15T01:37:02,664 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-15T01:37:02,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226622664 (current time:1734226622664). 2024-12-15T01:37:02,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:37:02,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-15T01:37:02,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:37:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61cb8179 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48dd3bf8 2024-12-15T01:37:02,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32c011d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:37:02,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:37:02,706 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:37:02,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61cb8179 to 127.0.0.1:64248 2024-12-15T01:37:02,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:02,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50343eb1 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7117ddb4 2024-12-15T01:37:02,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43eb5c9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:37:02,730 DEBUG [hconnection-0x63d3a6b0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:37:02,732 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:37:02,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:37:02,735 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59150, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:37:02,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50343eb1 to 127.0.0.1:64248 2024-12-15T01:37:02,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-15T01:37:02,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:37:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-15T01:37:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-15T01:37:02,741 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:37:02,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-15T01:37:02,742 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:37:02,745 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:37:02,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742329_1505 (size=203) 2024-12-15T01:37:02,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742329_1505 (size=203) 2024-12-15T01:37:02,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742329_1505 (size=203) 2024-12-15T01:37:02,751 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:37:02,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba}] 2024-12-15T01:37:02,752 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,752 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-15T01:37:02,890 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-15T01:37:02,903 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:37:02,903 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:37:02,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-15T01:37:02,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:02,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-15T01:37:02,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:02,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for 0df583d2bd60c0fb93df75ab79af75ab: 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for 1d4794c035c3c87db54e27f1a3f30bba: 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:37:02,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-15T01:37:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742330_1506 (size=82) 2024-12-15T01:37:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742330_1506 (size=82) 2024-12-15T01:37:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742331_1507 (size=82) 2024-12-15T01:37:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742330_1506 (size=82) 2024-12-15T01:37:02,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742331_1507 (size=82) 2024-12-15T01:37:02,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:02,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742331_1507 (size=82) 2024-12-15T01:37:02,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-15T01:37:02,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:02,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-15T01:37:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-15T01:37:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-15T01:37:02,916 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,916 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:02,916 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:02,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba in 166 msec 2024-12-15T01:37:02,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=204, resume processing ppid=203 2024-12-15T01:37:02,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab in 166 msec 2024-12-15T01:37:02,918 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:37:02,919 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:37:02,919 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:37:02,919 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:02,919 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:02,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742332_1508 (size=585) 2024-12-15T01:37:02,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742332_1508 (size=585) 2024-12-15T01:37:02,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742332_1508 (size=585) 2024-12-15T01:37:02,932 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:37:02,935 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:37:02,936 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:02,937 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:37:02,937 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-15T01:37:02,937 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 198 msec 2024-12-15T01:37:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-15T01:37:03,045 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-15T01:37:03,054 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39381 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:37:03,054 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42663 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. with WAL disabled. Data may be lost in the event of a crash. 2024-12-15T01:37:03,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:03,058 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-15T01:37:03,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-15T01:37:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734226623067 (current time:1734226623067). 2024-12-15T01:37:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-15T01:37:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-15T01:37:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-15T01:37:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40c58be1 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53117195 2024-12-15T01:37:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1147d8e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:37:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:37:03,080 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:37:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40c58be1 to 127.0.0.1:64248 2024-12-15T01:37:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x357015d4 to 127.0.0.1:64248 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d355736 2024-12-15T01:37:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@421a52e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T01:37:03,096 DEBUG [hconnection-0x2bc9d712-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:37:03,097 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:37:03,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T01:37:03,100 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T01:37:03,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x357015d4 to 127.0.0.1:64248 2024-12-15T01:37:03,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:03,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-15T01:37:03,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-15T01:37:03,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-15T01:37:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-15T01:37:03,104 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-15T01:37:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-15T01:37:03,105 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-15T01:37:03,107 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-15T01:37:03,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742333_1509 (size=198) 2024-12-15T01:37:03,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742333_1509 (size=198) 2024-12-15T01:37:03,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742333_1509 (size=198) 2024-12-15T01:37:03,115 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-15T01:37:03,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba}] 2024-12-15T01:37:03,117 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:03,117 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-15T01:37:03,268 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:37:03,268 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:37:03,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39381 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-15T01:37:03,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42663 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-15T01:37:03,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:03,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:03,269 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing 0df583d2bd60c0fb93df75ab79af75ab 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-15T01:37:03,269 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing 1d4794c035c3c87db54e27f1a3f30bba 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-15T01:37:03,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/.tmp/cf/86b9839d345b4a8399eddce803861a93 is 71, key is 180ee74bb6d894da79fd95934773e58f/cf:q/1734226623054/Put/seqid=0 2024-12-15T01:37:03,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/.tmp/cf/b015a1c22bb241a3a77bec0221d47683 is 71, key is 0242458fb0cf102ad71b26d60e2b851c/cf:q/1734226623053/Put/seqid=0 2024-12-15T01:37:03,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742335_1511 (size=5490) 2024-12-15T01:37:03,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742335_1511 (size=5490) 2024-12-15T01:37:03,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742334_1510 (size=8120) 2024-12-15T01:37:03,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742334_1510 (size=8120) 2024-12-15T01:37:03,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742334_1510 (size=8120) 2024-12-15T01:37:03,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742335_1511 (size=5490) 2024-12-15T01:37:03,290 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/.tmp/cf/b015a1c22bb241a3a77bec0221d47683 2024-12-15T01:37:03,290 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/.tmp/cf/86b9839d345b4a8399eddce803861a93 2024-12-15T01:37:03,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/.tmp/cf/b015a1c22bb241a3a77bec0221d47683 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/cf/b015a1c22bb241a3a77bec0221d47683 2024-12-15T01:37:03,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/.tmp/cf/86b9839d345b4a8399eddce803861a93 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/cf/86b9839d345b4a8399eddce803861a93 2024-12-15T01:37:03,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/cf/b015a1c22bb241a3a77bec0221d47683, entries=6, sequenceid=6, filesize=5.4 K 2024-12-15T01:37:03,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/cf/86b9839d345b4a8399eddce803861a93, entries=44, sequenceid=6, filesize=7.9 K 2024-12-15T01:37:03,298 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 1d4794c035c3c87db54e27f1a3f30bba in 29ms, sequenceid=6, compaction requested=false 2024-12-15T01:37:03,298 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 0df583d2bd60c0fb93df75ab79af75ab in 29ms, sequenceid=6, compaction requested=false 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for 0df583d2bd60c0fb93df75ab79af75ab: 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for 1d4794c035c3c87db54e27f1a3f30bba: 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/cf/b015a1c22bb241a3a77bec0221d47683] hfiles 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/cf/86b9839d345b4a8399eddce803861a93] hfiles 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/cf/b015a1c22bb241a3a77bec0221d47683 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/cf/86b9839d345b4a8399eddce803861a93 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742337_1513 (size=121) 2024-12-15T01:37:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742337_1513 (size=121) 2024-12-15T01:37:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742336_1512 (size=121) 2024-12-15T01:37:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742337_1513 (size=121) 2024-12-15T01:37:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742336_1512 (size=121) 2024-12-15T01:37:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742336_1512 (size=121) 2024-12-15T01:37:03,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:03,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-15T01:37:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-15T01:37:03,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:03,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:03,308 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:03,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/66d575e28c61:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-15T01:37:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-15T01:37:03,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:03,308 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:03,309 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba in 193 msec 2024-12-15T01:37:03,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=207, resume processing ppid=206 2024-12-15T01:37:03,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab in 193 msec 2024-12-15T01:37:03,309 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-15T01:37:03,310 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-15T01:37:03,310 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-15T01:37:03,310 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,310 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742338_1514 (size=663) 2024-12-15T01:37:03,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742338_1514 (size=663) 2024-12-15T01:37:03,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742338_1514 (size=663) 2024-12-15T01:37:03,318 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-15T01:37:03,322 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-15T01:37:03,322 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,323 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-15T01:37:03,323 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-15T01:37:03,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 220 msec 2024-12-15T01:37:03,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-15T01:37:03,406 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-15T01:37:03,406 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406 2024-12-15T01:37:03,406 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33329, tgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406, rawTgtDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406, srcFsUri=hdfs://localhost:33329, srcDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:37:03,434 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33329, inputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b 2024-12-15T01:37:03,434 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,435 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-15T01:37:03,439 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:03,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742339_1515 (size=198) 2024-12-15T01:37:03,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742339_1515 (size=198) 2024-12-15T01:37:03,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742339_1515 (size=198) 2024-12-15T01:37:03,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742340_1516 (size=663) 2024-12-15T01:37:03,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742340_1516 (size=663) 2024-12-15T01:37:03,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742340_1516 (size=663) 2024-12-15T01:37:03,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-2986267689083579960.jar 2024-12-15T01:37:03,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:03,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:03,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-415045625971215005.jar 2024-12-15T01:37:04,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop-6129579595711096136.jar 2024-12-15T01:37:04,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-15T01:37:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-15T01:37:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-15T01:37:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-15T01:37:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-15T01:37:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-15T01:37:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-15T01:37:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-15T01:37:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-15T01:37:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-15T01:37:04,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-15T01:37:04,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-15T01:37:04,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-15T01:37:04,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:37:04,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:37:04,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:37:04,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:37:04,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-15T01:37:04,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:37:04,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-15T01:37:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742341_1517 (size=127628) 2024-12-15T01:37:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742341_1517 (size=127628) 2024-12-15T01:37:04,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742341_1517 (size=127628) 2024-12-15T01:37:04,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742342_1518 (size=2172137) 2024-12-15T01:37:04,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742342_1518 (size=2172137) 2024-12-15T01:37:04,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742342_1518 (size=2172137) 2024-12-15T01:37:04,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742343_1519 (size=213228) 2024-12-15T01:37:04,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742343_1519 (size=213228) 2024-12-15T01:37:04,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742343_1519 (size=213228) 2024-12-15T01:37:04,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742344_1520 (size=1877034) 2024-12-15T01:37:04,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742344_1520 (size=1877034) 2024-12-15T01:37:04,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742344_1520 (size=1877034) 2024-12-15T01:37:04,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742345_1521 (size=533455) 2024-12-15T01:37:04,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742345_1521 (size=533455) 2024-12-15T01:37:04,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742345_1521 (size=533455) 2024-12-15T01:37:04,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742346_1522 (size=7280644) 2024-12-15T01:37:04,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742346_1522 (size=7280644) 2024-12-15T01:37:04,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742346_1522 (size=7280644) 2024-12-15T01:37:04,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742347_1523 (size=4188619) 2024-12-15T01:37:04,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742347_1523 (size=4188619) 2024-12-15T01:37:04,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742347_1523 (size=4188619) 2024-12-15T01:37:04,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742348_1524 (size=20406) 2024-12-15T01:37:04,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742348_1524 (size=20406) 2024-12-15T01:37:04,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742348_1524 (size=20406) 2024-12-15T01:37:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742349_1525 (size=75495) 2024-12-15T01:37:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742349_1525 (size=75495) 2024-12-15T01:37:04,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742349_1525 (size=75495) 2024-12-15T01:37:04,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742350_1526 (size=45609) 2024-12-15T01:37:04,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742350_1526 (size=45609) 2024-12-15T01:37:04,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742350_1526 (size=45609) 2024-12-15T01:37:04,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742351_1527 (size=110084) 2024-12-15T01:37:04,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742351_1527 (size=110084) 2024-12-15T01:37:04,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742351_1527 (size=110084) 2024-12-15T01:37:04,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742352_1528 (size=1323991) 2024-12-15T01:37:04,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742352_1528 (size=1323991) 2024-12-15T01:37:04,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742352_1528 (size=1323991) 2024-12-15T01:37:04,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742353_1529 (size=23076) 2024-12-15T01:37:04,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742353_1529 (size=23076) 2024-12-15T01:37:04,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742353_1529 (size=23076) 2024-12-15T01:37:04,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742354_1530 (size=126803) 2024-12-15T01:37:04,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742354_1530 (size=126803) 2024-12-15T01:37:04,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742354_1530 (size=126803) 2024-12-15T01:37:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742355_1531 (size=322274) 2024-12-15T01:37:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742355_1531 (size=322274) 2024-12-15T01:37:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742355_1531 (size=322274) 2024-12-15T01:37:04,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742356_1532 (size=451756) 2024-12-15T01:37:04,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742356_1532 (size=451756) 2024-12-15T01:37:04,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742356_1532 (size=451756) 2024-12-15T01:37:04,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742357_1533 (size=1832290) 2024-12-15T01:37:04,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742357_1533 (size=1832290) 2024-12-15T01:37:04,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742357_1533 (size=1832290) 2024-12-15T01:37:04,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742358_1534 (size=30081) 2024-12-15T01:37:04,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742358_1534 (size=30081) 2024-12-15T01:37:04,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742358_1534 (size=30081) 2024-12-15T01:37:04,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742359_1535 (size=53616) 2024-12-15T01:37:04,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742359_1535 (size=53616) 2024-12-15T01:37:04,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742359_1535 (size=53616) 2024-12-15T01:37:04,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742360_1536 (size=29229) 2024-12-15T01:37:04,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742360_1536 (size=29229) 2024-12-15T01:37:04,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742360_1536 (size=29229) 2024-12-15T01:37:04,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742361_1537 (size=6350909) 2024-12-15T01:37:04,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742361_1537 (size=6350909) 2024-12-15T01:37:04,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742361_1537 (size=6350909) 2024-12-15T01:37:04,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742362_1538 (size=169089) 2024-12-15T01:37:04,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742362_1538 (size=169089) 2024-12-15T01:37:04,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742362_1538 (size=169089) 2024-12-15T01:37:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742363_1539 (size=912094) 2024-12-15T01:37:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742363_1539 (size=912094) 2024-12-15T01:37:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742363_1539 (size=912094) 2024-12-15T01:37:04,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742364_1540 (size=5175431) 2024-12-15T01:37:04,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742364_1540 (size=5175431) 2024-12-15T01:37:04,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742364_1540 (size=5175431) 2024-12-15T01:37:04,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742365_1541 (size=136454) 2024-12-15T01:37:04,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742365_1541 (size=136454) 2024-12-15T01:37:04,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742365_1541 (size=136454) 2024-12-15T01:37:04,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742366_1542 (size=3317408) 2024-12-15T01:37:04,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742366_1542 (size=3317408) 2024-12-15T01:37:04,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742366_1542 (size=3317408) 2024-12-15T01:37:04,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742367_1543 (size=503880) 2024-12-15T01:37:04,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742367_1543 (size=503880) 2024-12-15T01:37:04,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742367_1543 (size=503880) 2024-12-15T01:37:04,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742368_1544 (size=4695811) 2024-12-15T01:37:04,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742368_1544 (size=4695811) 2024-12-15T01:37:04,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742368_1544 (size=4695811) 2024-12-15T01:37:04,766 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-15T01:37:04,767 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-15T01:37:04,768 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-15T01:37:04,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742369_1545 (size=366) 2024-12-15T01:37:04,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742369_1545 (size=366) 2024-12-15T01:37:04,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742369_1545 (size=366) 2024-12-15T01:37:04,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742370_1546 (size=15) 2024-12-15T01:37:04,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742370_1546 (size=15) 2024-12-15T01:37:04,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742370_1546 (size=15) 2024-12-15T01:37:04,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742371_1547 (size=305099) 2024-12-15T01:37:04,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742371_1547 (size=305099) 2024-12-15T01:37:04,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742371_1547 (size=305099) 2024-12-15T01:37:05,905 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:37:05,905 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-15T01:37:05,907 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0009_000001 (auth:SIMPLE) from 127.0.0.1:36848 2024-12-15T01:37:05,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_3/usercache/jenkins/appcache/application_1734226430009_0009/container_1734226430009_0009_01_000001/launch_container.sh] 2024-12-15T01:37:05,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_3/usercache/jenkins/appcache/application_1734226430009_0009/container_1734226430009_0009_01_000001/container_tokens] 2024-12-15T01:37:05,916 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-1_3/usercache/jenkins/appcache/application_1734226430009_0009/container_1734226430009_0009_01_000001/sysfs] 2024-12-15T01:37:06,778 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0010_000001 (auth:SIMPLE) from 127.0.0.1:60514 2024-12-15T01:37:06,802 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:37:10,819 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:37:11,695 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0010_000001 (auth:SIMPLE) from 127.0.0.1:33186 2024-12-15T01:37:11,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742372_1548 (size=350797) 2024-12-15T01:37:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742372_1548 (size=350797) 2024-12-15T01:37:11,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742372_1548 (size=350797) 2024-12-15T01:37:13,896 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0010_000001 (auth:SIMPLE) from 127.0.0.1:46222 2024-12-15T01:37:16,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742373_1549 (size=8120) 2024-12-15T01:37:16,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742373_1549 (size=8120) 2024-12-15T01:37:16,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742373_1549 (size=8120) 2024-12-15T01:37:16,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742374_1550 (size=5490) 2024-12-15T01:37:16,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742374_1550 (size=5490) 2024-12-15T01:37:16,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742374_1550 (size=5490) 2024-12-15T01:37:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742375_1551 (size=17455) 2024-12-15T01:37:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742375_1551 (size=17455) 2024-12-15T01:37:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742375_1551 (size=17455) 2024-12-15T01:37:16,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742376_1552 (size=476) 2024-12-15T01:37:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742376_1552 (size=476) 2024-12-15T01:37:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742376_1552 (size=476) 2024-12-15T01:37:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742377_1553 (size=17455) 2024-12-15T01:37:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742377_1553 (size=17455) 2024-12-15T01:37:16,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742377_1553 (size=17455) 2024-12-15T01:37:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742378_1554 (size=350797) 2024-12-15T01:37:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742378_1554 (size=350797) 2024-12-15T01:37:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742378_1554 (size=350797) 2024-12-15T01:37:16,588 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0010/container_1734226430009_0010_01_000002/launch_container.sh] 2024-12-15T01:37:16,588 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0010/container_1734226430009_0010_01_000002/container_tokens] 2024-12-15T01:37:16,588 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_0/usercache/jenkins/appcache/application_1734226430009_0010/container_1734226430009_0010_01_000002/sysfs] 2024-12-15T01:37:16,599 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0010_000001 (auth:SIMPLE) from 127.0.0.1:46236 2024-12-15T01:37:17,929 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-15T01:37:17,930 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-15T01:37:17,935 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:17,935 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-15T01:37:17,935 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-15T01:37:17,935 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:17,936 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-15T01:37:17,936 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-15T01:37:17,936 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-778768832_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:17,936 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-15T01:37:17,936 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/export-test/export-1734226623406/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-15T01:37:17,941 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:17,942 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:17,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:17,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-15T01:37:17,944 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226637944"}]},"ts":"1734226637944"} 2024-12-15T01:37:17,945 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-15T01:37:18,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-15T01:37:18,098 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-15T01:37:18,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-15T01:37:18,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0df583d2bd60c0fb93df75ab79af75ab, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=1d4794c035c3c87db54e27f1a3f30bba, UNASSIGN}] 2024-12-15T01:37:18,101 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0df583d2bd60c0fb93df75ab79af75ab, UNASSIGN 2024-12-15T01:37:18,101 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=1d4794c035c3c87db54e27f1a3f30bba, UNASSIGN 2024-12-15T01:37:18,102 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=1d4794c035c3c87db54e27f1a3f30bba, regionState=CLOSING, regionLocation=66d575e28c61,42663,1734226422715 2024-12-15T01:37:18,102 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=0df583d2bd60c0fb93df75ab79af75ab, regionState=CLOSING, regionLocation=66d575e28c61,39381,1734226422872 2024-12-15T01:37:18,104 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:37:18,104 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=211, state=RUNNABLE; CloseRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab, server=66d575e28c61,39381,1734226422872}] 2024-12-15T01:37:18,105 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T01:37:18,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=212, state=RUNNABLE; CloseRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba, server=66d575e28c61,42663,1734226422715}] 2024-12-15T01:37:18,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-15T01:37:18,256 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 66d575e28c61,42663,1734226422715 2024-12-15T01:37:18,256 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 66d575e28c61,39381,1734226422872 2024-12-15T01:37:18,256 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:18,256 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:37:18,256 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing 1d4794c035c3c87db54e27f1a3f30bba, disabling compactions & flushes 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T01:37:18,257 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. after waiting 0 ms 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 0df583d2bd60c0fb93df75ab79af75ab, disabling compactions & flushes 2024-12-15T01:37:18,257 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. after waiting 0 ms 2024-12-15T01:37:18,257 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:18,262 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:37:18,262 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:37:18,262 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:18,262 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:18,262 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab. 2024-12-15T01:37:18,262 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba. 2024-12-15T01:37:18,263 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 0df583d2bd60c0fb93df75ab79af75ab: 2024-12-15T01:37:18,263 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for 1d4794c035c3c87db54e27f1a3f30bba: 2024-12-15T01:37:18,264 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed 1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:18,265 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=1d4794c035c3c87db54e27f1a3f30bba, regionState=CLOSED 2024-12-15T01:37:18,265 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:18,265 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=0df583d2bd60c0fb93df75ab79af75ab, regionState=CLOSED 2024-12-15T01:37:18,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=212 2024-12-15T01:37:18,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=211 2024-12-15T01:37:18,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=211, state=SUCCESS; CloseRegionProcedure 0df583d2bd60c0fb93df75ab79af75ab, server=66d575e28c61,39381,1734226422872 in 163 msec 2024-12-15T01:37:18,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=212, state=SUCCESS; CloseRegionProcedure 1d4794c035c3c87db54e27f1a3f30bba, server=66d575e28c61,42663,1734226422715 in 161 msec 2024-12-15T01:37:18,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=1d4794c035c3c87db54e27f1a3f30bba, UNASSIGN in 168 msec 2024-12-15T01:37:18,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=211, resume processing ppid=210 2024-12-15T01:37:18,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0df583d2bd60c0fb93df75ab79af75ab, UNASSIGN in 168 msec 2024-12-15T01:37:18,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-15T01:37:18,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 171 msec 2024-12-15T01:37:18,271 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734226638271"}]},"ts":"1734226638271"} 2024-12-15T01:37:18,272 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-15T01:37:18,286 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-15T01:37:18,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 344 msec 2024-12-15T01:37:18,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-15T01:37:18,546 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-15T01:37:18,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,548 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,549 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,549 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35281 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,551 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:18,551 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:18,553 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/recovered.edits] 2024-12-15T01:37:18,553 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/cf, FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/recovered.edits] 2024-12-15T01:37:18,557 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/cf/86b9839d345b4a8399eddce803861a93 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/cf/86b9839d345b4a8399eddce803861a93 2024-12-15T01:37:18,560 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba/recovered.edits/9.seqid 2024-12-15T01:37:18,560 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/1d4794c035c3c87db54e27f1a3f30bba 2024-12-15T01:37:18,567 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/cf/b015a1c22bb241a3a77bec0221d47683 to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/cf/b015a1c22bb241a3a77bec0221d47683 2024-12-15T01:37:18,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-15T01:37:18,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-15T01:37:18,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-15T01:37:18,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-15T01:37:18,572 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/recovered.edits/9.seqid to hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab/recovered.edits/9.seqid 2024-12-15T01:37:18,573 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testtb-testExportFileSystemStateWithSkipTmp/0df583d2bd60c0fb93df75ab79af75ab 2024-12-15T01:37:18,573 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-15T01:37:18,575 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,577 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-15T01:37:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-15T01:37:18,581 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-15T01:37:18,581 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:18,581 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:18,581 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:18,582 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-15T01:37:18,582 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,582 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-15T01:37:18,582 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226638582"}]},"ts":"9223372036854775807"} 2024-12-15T01:37:18,582 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734226638582"}]},"ts":"9223372036854775807"} 2024-12-15T01:37:18,584 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-15T01:37:18,584 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0df583d2bd60c0fb93df75ab79af75ab, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734226621542.0df583d2bd60c0fb93df75ab79af75ab.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1d4794c035c3c87db54e27f1a3f30bba, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734226621542.1d4794c035c3c87db54e27f1a3f30bba.', STARTKEY => '1', ENDKEY => ''}] 2024-12-15T01:37:18,584 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-15T01:37:18,584 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734226638584"}]},"ts":"9223372036854775807"} 2024-12-15T01:37:18,586 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-15T01:37:18,595 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 48 msec 2024-12-15T01:37:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-15T01:37:18,682 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-15T01:37:18,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-15T01:37:18,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,690 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-15T01:37:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:18,708 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=814 (was 817), OpenFileDescriptor=809 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=389 (was 370) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=2241 (was 2870) 2024-12-15T01:37:18,709 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-15T01:37:18,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-15T01:37:18,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66a81bd4{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-15T01:37:18,716 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72ae2683{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:37:18,717 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:37:18,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2876a3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-15T01:37:18,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1da2c307{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:37:21,077 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6e4491bba5f2e14d8ad41ac86f60d6f5, had cached 0 bytes from a total of 8120 2024-12-15T01:37:21,079 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region eded217274ea249d8fe0b60f2e3037a1, had cached 0 bytes from a total of 5490 2024-12-15T01:37:22,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-15T01:37:22,683 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734226430009_0010_000001 (auth:SIMPLE) from 127.0.0.1:49244 2024-12-15T01:37:22,693 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_2/usercache/jenkins/appcache/application_1734226430009_0010/container_1734226430009_0010_01_000001/launch_container.sh] 2024-12-15T01:37:22,693 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_2/usercache/jenkins/appcache/application_1734226430009_0010/container_1734226430009_0010_01_000001/container_tokens] 2024-12-15T01:37:22,693 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/MiniMRCluster_1118476571/yarn-659708074/MiniMRCluster_1118476571-localDir-nm-0_2/usercache/jenkins/appcache/application_1734226430009_0010/container_1734226430009_0010_01_000001/sysfs] 2024-12-15T01:37:23,783 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:37:35,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@595d61ca{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-15T01:37:35,735 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b09f030{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:37:35,735 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:37:35,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a31478{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-15T01:37:35,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44fd238{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:37:40,819 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:37:52,749 ERROR [Thread[Thread-417,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-15T01:37:52,750 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22f35f84{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-15T01:37:52,751 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d1a2166{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:37:52,751 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:37:52,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7867c0c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-15T01:37:52,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17958c7a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:37:52,754 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-15T01:37:52,758 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-15T01:37:52,758 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-15T01:37:52,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741830_1006 (size=946894) 2024-12-15T01:37:52,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741830_1006 (size=946894) 2024-12-15T01:37:52,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741830_1006 (size=946894) 2024-12-15T01:37:52,762 ERROR [Thread[Thread-440,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-15T01:37:52,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b0f89f9{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-15T01:37:52,766 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6bc50c27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:37:52,766 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:37:52,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@653547ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-15T01:37:52,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b757255{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:37:52,767 ERROR [Thread[Thread-399,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-15T01:37:52,767 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-15T01:37:52,767 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-15T01:37:52,767 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T01:37:52,767 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2522ae47 to 127.0.0.1:64248 2024-12-15T01:37:52,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:52,768 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-15T01:37:52,768 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1778584284, stopped=false 2024-12-15T01:37:52,768 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,768 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-15T01:37:52,768 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=66d575e28c61,40789,1734226421776 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-15T01:37:52,811 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:37:52,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:37:52,811 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:52,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:37:52,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:37:52,811 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:37:52,812 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '66d575e28c61,42663,1734226422715' ***** 2024-12-15T01:37:52,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T01:37:52,812 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,812 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-15T01:37:52,812 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '66d575e28c61,35281,1734226422799' ***** 2024-12-15T01:37:52,812 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,812 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-15T01:37:52,812 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '66d575e28c61,39381,1734226422872' ***** 2024-12-15T01:37:52,812 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,812 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-15T01:37:52,812 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-15T01:37:52,812 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-15T01:37:52,812 INFO [RS:0;66d575e28c61:42663 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-15T01:37:52,812 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-15T01:37:52,812 INFO [RS:0;66d575e28c61:42663 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-15T01:37:52,812 INFO [RS:1;66d575e28c61:35281 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-15T01:37:52,812 INFO [RS:2;66d575e28c61:39381 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-15T01:37:52,812 INFO [RS:1;66d575e28c61:35281 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-15T01:37:52,812 INFO [RS:2;66d575e28c61:39381 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-15T01:37:52,812 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(3579): Received CLOSE for d0386827b59519398c69d2b9e07bf334 2024-12-15T01:37:52,812 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(3579): Received CLOSE for eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:37:52,812 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(3579): Received CLOSE for 6e4491bba5f2e14d8ad41ac86f60d6f5 2024-12-15T01:37:52,813 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1224): stopping server 66d575e28c61,42663,1734226422715 2024-12-15T01:37:52,813 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1224): stopping server 66d575e28c61,35281,1734226422799 2024-12-15T01:37:52,813 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(3579): Received CLOSE for 874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:37:52,813 DEBUG [RS:0;66d575e28c61:42663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:52,813 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1224): stopping server 66d575e28c61,39381,1734226422872 2024-12-15T01:37:52,813 DEBUG [RS:1;66d575e28c61:35281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:52,813 DEBUG [RS:2;66d575e28c61:39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:52,813 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-15T01:37:52,813 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1603): Online Regions={6e4491bba5f2e14d8ad41ac86f60d6f5=testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5., 874e078b36e6d955b975f9e096ec95f5=hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5.} 2024-12-15T01:37:52,813 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-15T01:37:52,813 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-15T01:37:52,813 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-15T01:37:52,813 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-15T01:37:52,813 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1603): Online Regions={d0386827b59519398c69d2b9e07bf334=hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334.} 2024-12-15T01:37:52,813 INFO [RS:0;66d575e28c61:42663 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-15T01:37:52,813 INFO [RS:0;66d575e28c61:42663 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-15T01:37:52,813 INFO [RS:0;66d575e28c61:42663 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-15T01:37:52,813 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-15T01:37:52,813 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 6e4491bba5f2e14d8ad41ac86f60d6f5, disabling compactions & flushes 2024-12-15T01:37:52,813 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:37:52,813 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:37:52,813 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. after waiting 0 ms 2024-12-15T01:37:52,813 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:37:52,814 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-15T01:37:52,814 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1603): Online Regions={eded217274ea249d8fe0b60f2e3037a1=testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1., 1588230740=hbase:meta,,1.1588230740} 2024-12-15T01:37:52,814 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-15T01:37:52,814 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-15T01:37:52,814 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-15T01:37:52,814 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-15T01:37:52,814 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-15T01:37:52,814 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-15T01:37:52,816 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing eded217274ea249d8fe0b60f2e3037a1, disabling compactions & flushes 2024-12-15T01:37:52,816 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d0386827b59519398c69d2b9e07bf334, disabling compactions & flushes 2024-12-15T01:37:52,817 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:37:52,817 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:37:52,817 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:37:52,817 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:37:52,817 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. after waiting 0 ms 2024-12-15T01:37:52,817 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:37:52,817 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. after waiting 0 ms 2024-12-15T01:37:52,817 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:37:52,817 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing d0386827b59519398c69d2b9e07bf334 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-15T01:37:52,817 DEBUG [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1629): Waiting on 6e4491bba5f2e14d8ad41ac86f60d6f5, 874e078b36e6d955b975f9e096ec95f5 2024-12-15T01:37:52,817 DEBUG [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1629): Waiting on d0386827b59519398c69d2b9e07bf334 2024-12-15T01:37:52,817 DEBUG [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, eded217274ea249d8fe0b60f2e3037a1 2024-12-15T01:37:52,817 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/6e4491bba5f2e14d8ad41ac86f60d6f5/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-15T01:37:52,818 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,818 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:37:52,818 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 6e4491bba5f2e14d8ad41ac86f60d6f5: 2024-12-15T01:37:52,818 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5. 2024-12-15T01:37:52,819 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 874e078b36e6d955b975f9e096ec95f5, disabling compactions & flushes 2024-12-15T01:37:52,819 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:37:52,819 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:37:52,819 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. after waiting 0 ms 2024-12-15T01:37:52,819 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:37:52,819 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 874e078b36e6d955b975f9e096ec95f5 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-15T01:37:52,820 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/default/testExportExpiredSnapshot/eded217274ea249d8fe0b60f2e3037a1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-15T01:37:52,821 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,821 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:37:52,821 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for eded217274ea249d8fe0b60f2e3037a1: 2024-12-15T01:37:52,821 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1734226550667.eded217274ea249d8fe0b60f2e3037a1. 2024-12-15T01:37:52,830 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5/.tmp/info/3875e3c51a324ae5b7cb0884ffd55656 is 45, key is default/info:d/1734226426143/Put/seqid=0 2024-12-15T01:37:52,830 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334/.tmp/l/78d674164275436598baaddad4a5adaf is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1734226548661/DeleteFamily/seqid=0 2024-12-15T01:37:52,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742380_1556 (size=5695) 2024-12-15T01:37:52,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742379_1555 (size=5037) 2024-12-15T01:37:52,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742379_1555 (size=5037) 2024-12-15T01:37:52,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742380_1556 (size=5695) 2024-12-15T01:37:52,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742380_1556 (size=5695) 2024-12-15T01:37:52,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742379_1555 (size=5037) 2024-12-15T01:37:52,836 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5/.tmp/info/3875e3c51a324ae5b7cb0884ffd55656 2024-12-15T01:37:52,836 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334/.tmp/l/78d674164275436598baaddad4a5adaf 2024-12-15T01:37:52,839 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/info/1743fcbc615e47a9878a4137bdb66ff6 is 173, key is testExportExpiredSnapshot,1,1734226550667.6e4491bba5f2e14d8ad41ac86f60d6f5./info:regioninfo/1734226551090/Put/seqid=0 2024-12-15T01:37:52,841 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 78d674164275436598baaddad4a5adaf 2024-12-15T01:37:52,841 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334/.tmp/l/78d674164275436598baaddad4a5adaf as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334/l/78d674164275436598baaddad4a5adaf 2024-12-15T01:37:52,844 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5/.tmp/info/3875e3c51a324ae5b7cb0884ffd55656 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5/info/3875e3c51a324ae5b7cb0884ffd55656 2024-12-15T01:37:52,846 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 78d674164275436598baaddad4a5adaf 2024-12-15T01:37:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742381_1557 (size=15630) 2024-12-15T01:37:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742381_1557 (size=15630) 2024-12-15T01:37:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742381_1557 (size=15630) 2024-12-15T01:37:52,846 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334/l/78d674164275436598baaddad4a5adaf, entries=12, sequenceid=27, filesize=5.6 K 2024-12-15T01:37:52,847 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/info/1743fcbc615e47a9878a4137bdb66ff6 2024-12-15T01:37:52,847 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for d0386827b59519398c69d2b9e07bf334 in 30ms, sequenceid=27, compaction requested=false 2024-12-15T01:37:52,849 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5/info/3875e3c51a324ae5b7cb0884ffd55656, entries=2, sequenceid=6, filesize=4.9 K 2024-12-15T01:37:52,849 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 874e078b36e6d955b975f9e096ec95f5 in 30ms, sequenceid=6, compaction requested=false 2024-12-15T01:37:52,850 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/acl/d0386827b59519398c69d2b9e07bf334/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-15T01:37:52,850 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,851 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:37:52,851 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d0386827b59519398c69d2b9e07bf334: 2024-12-15T01:37:52,851 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1734226426324.d0386827b59519398c69d2b9e07bf334. 2024-12-15T01:37:52,852 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/namespace/874e078b36e6d955b975f9e096ec95f5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T01:37:52,852 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,852 INFO [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:37:52,852 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 874e078b36e6d955b975f9e096ec95f5: 2024-12-15T01:37:52,853 DEBUG [RS_CLOSE_REGION-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734226425464.874e078b36e6d955b975f9e096ec95f5. 2024-12-15T01:37:52,868 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/rep_barrier/8935f677158a464ab70ed6d88628cdfc is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995./rep_barrier:/1734226548683/DeleteFamily/seqid=0 2024-12-15T01:37:52,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742382_1558 (size=8007) 2024-12-15T01:37:52,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742382_1558 (size=8007) 2024-12-15T01:37:52,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742382_1558 (size=8007) 2024-12-15T01:37:52,873 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/rep_barrier/8935f677158a464ab70ed6d88628cdfc 2024-12-15T01:37:52,888 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/table/23346966c5144f40923295d1f7c0eb3c is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734226532421.45053ccce96bc6a9f3f1b388ece26995./table:/1734226548683/DeleteFamily/seqid=0 2024-12-15T01:37:52,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073742383_1559 (size=8861) 2024-12-15T01:37:52,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073742383_1559 (size=8861) 2024-12-15T01:37:52,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073742383_1559 (size=8861) 2024-12-15T01:37:52,893 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/table/23346966c5144f40923295d1f7c0eb3c 2024-12-15T01:37:52,897 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/info/1743fcbc615e47a9878a4137bdb66ff6 as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/info/1743fcbc615e47a9878a4137bdb66ff6 2024-12-15T01:37:52,901 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/info/1743fcbc615e47a9878a4137bdb66ff6, entries=84, sequenceid=202, filesize=15.3 K 2024-12-15T01:37:52,902 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/rep_barrier/8935f677158a464ab70ed6d88628cdfc as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/rep_barrier/8935f677158a464ab70ed6d88628cdfc 2024-12-15T01:37:52,904 INFO [regionserver/66d575e28c61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T01:37:52,906 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/rep_barrier/8935f677158a464ab70ed6d88628cdfc, entries=21, sequenceid=202, filesize=7.8 K 2024-12-15T01:37:52,906 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/.tmp/table/23346966c5144f40923295d1f7c0eb3c as hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/table/23346966c5144f40923295d1f7c0eb3c 2024-12-15T01:37:52,909 INFO [regionserver/66d575e28c61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T01:37:52,910 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/table/23346966c5144f40923295d1f7c0eb3c, entries=38, sequenceid=202, filesize=8.7 K 2024-12-15T01:37:52,911 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=202, compaction requested=false 2024-12-15T01:37:52,911 INFO [regionserver/66d575e28c61:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T01:37:52,914 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-15T01:37:52,915 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:37:52,915 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-15T01:37:52,915 INFO [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-15T01:37:52,915 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-15T01:37:52,915 DEBUG [RS_CLOSE_META-regionserver/66d575e28c61:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-15T01:37:53,017 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1250): stopping server 66d575e28c61,42663,1734226422715; all regions closed. 2024-12-15T01:37:53,017 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1250): stopping server 66d575e28c61,35281,1734226422799; all regions closed. 2024-12-15T01:37:53,017 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1250): stopping server 66d575e28c61,39381,1734226422872; all regions closed. 2024-12-15T01:37:53,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741834_1010 (size=14622) 2024-12-15T01:37:53,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741834_1010 (size=14622) 2024-12-15T01:37:53,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741833_1009 (size=14336) 2024-12-15T01:37:53,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741833_1009 (size=14336) 2024-12-15T01:37:53,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741834_1010 (size=14622) 2024-12-15T01:37:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741833_1009 (size=14336) 2024-12-15T01:37:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741836_1012 (size=80694) 2024-12-15T01:37:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741836_1012 (size=80694) 2024-12-15T01:37:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741836_1012 (size=80694) 2024-12-15T01:37:53,026 DEBUG [RS:0;66d575e28c61:42663 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs 2024-12-15T01:37:53,026 INFO [RS:0;66d575e28c61:42663 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 66d575e28c61%2C42663%2C1734226422715.meta:.meta(num 1734226425234) 2024-12-15T01:37:53,026 DEBUG [RS:1;66d575e28c61:35281 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs 2024-12-15T01:37:53,026 INFO [RS:1;66d575e28c61:35281 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 66d575e28c61%2C35281%2C1734226422799:(num 1734226424697) 2024-12-15T01:37:53,026 DEBUG [RS:1;66d575e28c61:35281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:53,026 DEBUG [RS:2;66d575e28c61:39381 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs 2024-12-15T01:37:53,026 INFO [RS:1;66d575e28c61:35281 {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T01:37:53,026 INFO [RS:2;66d575e28c61:39381 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 66d575e28c61%2C39381%2C1734226422872:(num 1734226424697) 2024-12-15T01:37:53,027 DEBUG [RS:2;66d575e28c61:39381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:53,027 INFO [RS:2;66d575e28c61:39381 {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T01:37:53,027 INFO [RS:2;66d575e28c61:39381 {}] hbase.ChoreService(370): Chore service for: regionserver/66d575e28c61:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-15T01:37:53,027 INFO [RS:1;66d575e28c61:35281 {}] hbase.ChoreService(370): Chore service for: regionserver/66d575e28c61:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-15T01:37:53,027 INFO [RS:2;66d575e28c61:39381 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-15T01:37:53,027 INFO [RS:1;66d575e28c61:35281 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-15T01:37:53,027 INFO [RS:2;66d575e28c61:39381 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-15T01:37:53,027 INFO [RS:2;66d575e28c61:39381 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-15T01:37:53,027 INFO [RS:1;66d575e28c61:35281 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-15T01:37:53,027 INFO [RS:1;66d575e28c61:35281 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-15T01:37:53,027 INFO [regionserver/66d575e28c61:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-15T01:37:53,027 INFO [regionserver/66d575e28c61:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-15T01:37:53,027 INFO [RS:2;66d575e28c61:39381 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39381 2024-12-15T01:37:53,028 INFO [RS:1;66d575e28c61:35281 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35281 2024-12-15T01:37:53,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46345 is added to blk_1073741835_1011 (size=9316) 2024-12-15T01:37:53,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38403 is added to blk_1073741835_1011 (size=9316) 2024-12-15T01:37:53,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38745 is added to blk_1073741835_1011 (size=9316) 2024-12-15T01:37:53,029 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/WALs/66d575e28c61,42663,1734226422715/66d575e28c61%2C42663%2C1734226422715.1734226424700 not finished, retry = 0 2024-12-15T01:37:53,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-15T01:37:53,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d575e28c61,39381,1734226422872 2024-12-15T01:37:53,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d575e28c61,35281,1734226422799 2024-12-15T01:37:53,058 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d575e28c61,35281,1734226422799] 2024-12-15T01:37:53,058 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 66d575e28c61,35281,1734226422799; numProcessing=1 2024-12-15T01:37:53,133 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/66d575e28c61,35281,1734226422799 already deleted, retry=false 2024-12-15T01:37:53,133 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 66d575e28c61,35281,1734226422799 expired; onlineServers=2 2024-12-15T01:37:53,133 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d575e28c61,39381,1734226422872] 2024-12-15T01:37:53,133 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 66d575e28c61,39381,1734226422872; numProcessing=2 2024-12-15T01:37:53,135 DEBUG [RS:0;66d575e28c61:42663 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/oldWALs 2024-12-15T01:37:53,136 INFO [RS:0;66d575e28c61:42663 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 66d575e28c61%2C42663%2C1734226422715:(num 1734226424700) 2024-12-15T01:37:53,136 DEBUG [RS:0;66d575e28c61:42663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:53,136 INFO [RS:0;66d575e28c61:42663 {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T01:37:53,136 INFO [RS:0;66d575e28c61:42663 {}] hbase.ChoreService(370): Chore service for: regionserver/66d575e28c61:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-15T01:37:53,137 INFO [regionserver/66d575e28c61:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-15T01:37:53,137 INFO [RS:0;66d575e28c61:42663 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42663 2024-12-15T01:37:53,144 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/66d575e28c61,39381,1734226422872 already deleted, retry=false 2024-12-15T01:37:53,144 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 66d575e28c61,39381,1734226422872 expired; onlineServers=1 2024-12-15T01:37:53,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-15T01:37:53,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d575e28c61,42663,1734226422715 2024-12-15T01:37:53,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:37:53,158 INFO [RS:2;66d575e28c61:39381 {}] regionserver.HRegionServer(1307): Exiting; stopping=66d575e28c61,39381,1734226422872; zookeeper connection closed. 2024-12-15T01:37:53,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39381-0x1002752410c0003, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:37:53,158 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c8132a6 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c8132a6 2024-12-15T01:37:53,161 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d575e28c61,42663,1734226422715] 2024-12-15T01:37:53,161 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 66d575e28c61,42663,1734226422715; numProcessing=3 2024-12-15T01:37:53,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:37:53,166 INFO [RS:1;66d575e28c61:35281 {}] regionserver.HRegionServer(1307): Exiting; stopping=66d575e28c61,35281,1734226422799; zookeeper connection closed. 2024-12-15T01:37:53,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35281-0x1002752410c0002, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:37:53,166 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a42d0a3 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a42d0a3 2024-12-15T01:37:53,169 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/66d575e28c61,42663,1734226422715 already deleted, retry=false 2024-12-15T01:37:53,169 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 66d575e28c61,42663,1734226422715 expired; onlineServers=0 2024-12-15T01:37:53,169 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '66d575e28c61,40789,1734226421776' ***** 2024-12-15T01:37:53,169 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-15T01:37:53,170 DEBUG [M:0;66d575e28c61:40789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bfd6f2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d575e28c61/172.17.0.2:0 2024-12-15T01:37:53,170 INFO [M:0;66d575e28c61:40789 {}] regionserver.HRegionServer(1224): stopping server 66d575e28c61,40789,1734226421776 2024-12-15T01:37:53,170 INFO [M:0;66d575e28c61:40789 {}] regionserver.HRegionServer(1250): stopping server 66d575e28c61,40789,1734226421776; all regions closed. 2024-12-15T01:37:53,170 DEBUG [M:0;66d575e28c61:40789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T01:37:53,170 DEBUG [M:0;66d575e28c61:40789 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-15T01:37:53,170 DEBUG [M:0;66d575e28c61:40789 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-15T01:37:53,170 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-15T01:37:53,171 DEBUG [master/66d575e28c61:0:becomeActiveMaster-HFileCleaner.large.0-1734226424258 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d575e28c61:0:becomeActiveMaster-HFileCleaner.large.0-1734226424258,5,FailOnTimeoutGroup] 2024-12-15T01:37:53,171 DEBUG [master/66d575e28c61:0:becomeActiveMaster-HFileCleaner.small.0-1734226424259 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d575e28c61:0:becomeActiveMaster-HFileCleaner.small.0-1734226424259,5,FailOnTimeoutGroup] 2024-12-15T01:37:53,171 INFO [M:0;66d575e28c61:40789 {}] hbase.ChoreService(370): Chore service for: master/66d575e28c61:0 had [] on shutdown 2024-12-15T01:37:53,171 DEBUG [M:0;66d575e28c61:40789 {}] master.HMaster(1733): Stopping service threads 2024-12-15T01:37:53,171 INFO [M:0;66d575e28c61:40789 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-15T01:37:53,172 INFO [M:0;66d575e28c61:40789 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-15T01:37:53,172 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-15T01:37:53,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-15T01:37:53,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T01:37:53,177 DEBUG [M:0;66d575e28c61:40789 {}] zookeeper.ZKUtil(347): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-15T01:37:53,177 WARN [M:0;66d575e28c61:40789 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-15T01:37:53,177 INFO [M:0;66d575e28c61:40789 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-15T01:37:53,178 INFO [M:0;66d575e28c61:40789 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-15T01:37:53,178 DEBUG [M:0;66d575e28c61:40789 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-15T01:37:53,178 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T01:37:53,190 INFO [M:0;66d575e28c61:40789 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T01:37:53,190 DEBUG [M:0;66d575e28c61:40789 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T01:37:53,190 DEBUG [M:0;66d575e28c61:40789 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-15T01:37:53,190 DEBUG [M:0;66d575e28c61:40789 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T01:37:53,190 INFO [M:0;66d575e28c61:40789 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.44 KB heapSize=966.84 KB 2024-12-15T01:37:53,190 ERROR [AsyncFSWAL-0-hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData-prefix:66d575e28c61,40789,1734226421776 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData-prefix:66d575e28c61,40789,1734226421776,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:37:53,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:37:53,261 INFO [RS:0;66d575e28c61:42663 {}] regionserver.HRegionServer(1307): Exiting; stopping=66d575e28c61,42663,1734226422715; zookeeper connection closed. 2024-12-15T01:37:53,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42663-0x1002752410c0001, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:37:53,262 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69193175 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69193175 2024-12-15T01:37:53,262 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-15T01:37:58,341 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:38:02,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:38:02,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-15T01:38:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-15T01:38:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-15T01:38:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:38:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-15T01:38:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-15T01:38:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-15T01:38:02,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-15T01:38:07,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-15T01:38:10,819 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:38:40,820 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:38:42,991 DEBUG [master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-15T01:38:42,992 DEBUG [master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-15T01:38:50,785 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;66d575e28c61:40789 225 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 28 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@631b7d9f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5234a66a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 17 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.CountDownLatch$Sync@17a3ae62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13486 Waited count: 13946 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@10fe4b5c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@678eb8df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7299a68c): State: TIMED_WAITING Blocked count: 0 Waited count: 631 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1021651416-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1021651416-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1021651416-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1021651416-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1021651416-41-acceptor-0@752a6971-ServerConnector@2847eba1{HTTP/1.1, (http/1.1)}{localhost:35059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1021651416-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1021651416-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1021651416-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-56b4614a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2907 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41796701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33329): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5d1d1282): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7f2d48e5): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 108 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 31059 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1235 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45ab02b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33329): State: TIMED_WAITING Blocked count: 109 Waited count: 2123 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33329): State: TIMED_WAITING Blocked count: 86 Waited count: 2125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33329): State: TIMED_WAITING Blocked count: 118 Waited count: 2114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33329): State: TIMED_WAITING Blocked count: 106 Waited count: 2108 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33329): State: TIMED_WAITING Blocked count: 97 Waited count: 2109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab4ee67): State: TIMED_WAITING Blocked count: 0 Waited count: 158 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2e0c8d43): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2ae35f71): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@63bf282f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388150912-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388150912-88-acceptor-0@754170a3-ServerConnector@120fd64b{HTTP/1.1, (http/1.1)}{localhost:41125}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388150912-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1388150912-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7e10f8ad-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@95a30d6): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 42339): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 300 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79c92cfb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1352 Waited count: 1339 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1340d90b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 318 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 316 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 317 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1956370377-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1956370377-120-acceptor-0@7570cd01-ServerConnector@423c535d{HTTP/1.1, (http/1.1)}{localhost:36059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1956370377-121): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1956370377-122): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-2cc38048-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (309020234) connection to localhost/127.0.0.1:33329 from jenkins): State: TIMED_WAITING Blocked count: 1183 Waited count: 1184 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 0 Waited count: 1876 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@f6c94d4): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44765): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 258 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6be6fb02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1327 Waited count: 1335 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@72b5e237): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 315 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 328 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp395497086-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp395497086-154-acceptor-0@4e85d132-ServerConnector@27188009{HTTP/1.1, (http/1.1)}{localhost:34737}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp395497086-155): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp395497086-156): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-83efa6e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@27b376d6): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41527): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 261 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581883eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1225 Waited count: 1331 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@677edd20): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1)): State: TIMED_WAITING Blocked count: 33 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 194 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@1e372f9b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@358d9b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@47e47bbb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64248): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 29 Waited count: 657 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76aed49e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:64248):): State: WAITING Blocked count: 1 Waited count: 768 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e4ff6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 798 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bff6515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e007a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 2 Waited count: 286 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:64248)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 20 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@484eceed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54b25a4f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 9 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 7 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 7 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@19ae83e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 137 Waited count: 550 Waiting on java.util.concurrent.Semaphore$NonfairSync@6fa86368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 168 Waited count: 594 Waiting on java.util.concurrent.Semaphore$NonfairSync@51aff1d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789): State: WAITING Blocked count: 65 Waited count: 6086 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1789b98e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56ca262b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@78c54c65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22dacf3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7591bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 113 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;66d575e28c61:40789): State: TIMED_WAITING Blocked count: 6 Waited count: 2507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$970/0x00007f4f40f1c470.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@2a374a6e): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3097 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 83 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 30897 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58302821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e51ddb1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46b65c10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78ab7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (LeaseRenewer:jenkins.hfs.2@localhost:33329): State: TIMED_WAITING Blocked count: 10 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (LeaseRenewer:jenkins.hfs.1@localhost:33329): State: TIMED_WAITING Blocked count: 9 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (LeaseRenewer:jenkins.hfs.0@localhost:33329): State: TIMED_WAITING Blocked count: 9 Waited count: 323 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 30682 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 761 Waiting on java.util.concurrent.ForkJoinPool@2e6e0460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 605 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 363 Waiting on java.util.concurrent.ForkJoinPool@2e6e0460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 606 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 607 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 608 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1015 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1118 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@829102f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1531 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@41f0670c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2381 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3260 (region-location-4): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 64 Waiting on java.util.concurrent.ForkJoinPool@2e6e0460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4946 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4947 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4948 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8929 (AsyncFSWAL-1-hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData-prefix:66d575e28c61,40789,1734226421776): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f632656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-15T01:39:10,820 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:39:40,821 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;66d575e28c61:40789 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 28 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@631b7d9f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5234a66a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 20 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.CountDownLatch$Sync@61226526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13486 Waited count: 13947 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@10fe4b5c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@678eb8df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7299a68c): State: TIMED_WAITING Blocked count: 0 Waited count: 751 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1021651416-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1021651416-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1021651416-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1021651416-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1021651416-41-acceptor-0@752a6971-ServerConnector@2847eba1{HTTP/1.1, (http/1.1)}{localhost:35059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1021651416-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1021651416-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1021651416-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-56b4614a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2907 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41796701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33329): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5d1d1282): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7f2d48e5): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 36985 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1235 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45ab02b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33329): State: TIMED_WAITING Blocked count: 109 Waited count: 2183 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33329): State: TIMED_WAITING Blocked count: 86 Waited count: 2185 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33329): State: TIMED_WAITING Blocked count: 118 Waited count: 2174 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33329): State: TIMED_WAITING Blocked count: 106 Waited count: 2168 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33329): State: TIMED_WAITING Blocked count: 97 Waited count: 2169 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab4ee67): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2e0c8d43): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2ae35f71): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@63bf282f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388150912-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388150912-88-acceptor-0@754170a3-ServerConnector@120fd64b{HTTP/1.1, (http/1.1)}{localhost:41125}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388150912-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1388150912-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7e10f8ad-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@95a30d6): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 42339): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 320 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79c92cfb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1372 Waited count: 1379 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1340d90b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 376 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1956370377-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1956370377-120-acceptor-0@7570cd01-ServerConnector@423c535d{HTTP/1.1, (http/1.1)}{localhost:36059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1956370377-121): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1956370377-122): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-2cc38048-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (309020234) connection to localhost/127.0.0.1:33329 from jenkins): State: TIMED_WAITING Blocked count: 1243 Waited count: 1244 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 0 Waited count: 1936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@f6c94d4): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44765): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 278 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6be6fb02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1347 Waited count: 1375 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@72b5e237): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 414 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp395497086-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp395497086-154-acceptor-0@4e85d132-ServerConnector@27188009{HTTP/1.1, (http/1.1)}{localhost:34737}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp395497086-155): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp395497086-156): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-83efa6e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@27b376d6): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41527): State: TIMED_WAITING Blocked count: 1 Waited count: 39 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 281 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581883eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1245 Waited count: 1371 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@677edd20): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1)): State: TIMED_WAITING Blocked count: 33 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 194 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@1e372f9b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@358d9b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@47e47bbb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64248): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 187 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 29 Waited count: 662 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76aed49e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:64248):): State: WAITING Blocked count: 1 Waited count: 773 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e4ff6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 803 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bff6515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e007a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 2 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:64248)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 20 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@484eceed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54b25a4f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 9 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@19ae83e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 137 Waited count: 550 Waiting on java.util.concurrent.Semaphore$NonfairSync@6fa86368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 168 Waited count: 594 Waiting on java.util.concurrent.Semaphore$NonfairSync@51aff1d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789): State: WAITING Blocked count: 65 Waited count: 6086 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1789b98e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56ca262b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@78c54c65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22dacf3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7591bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 113 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;66d575e28c61:40789): State: TIMED_WAITING Blocked count: 6 Waited count: 2507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$970/0x00007f4f40f1c470.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@2a374a6e): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3696 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 83 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2cd291aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 37 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 36899 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58302821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e51ddb1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46b65c10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78ab7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 36685 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 762 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 605 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 363 Waiting on java.util.concurrent.ForkJoinPool@2e6e0460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 606 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 607 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 608 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1015 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1118 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@829102f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1531 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@41f0670c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3260 (region-location-4): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 64 Waiting on java.util.concurrent.ForkJoinPool@2e6e0460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4946 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4947 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4948 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8929 (AsyncFSWAL-1-hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData-prefix:66d575e28c61,40789,1734226421776): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f632656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-15T01:40:10,821 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:40:40,821 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;66d575e28c61:40789 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 28 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@631b7d9f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5234a66a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4380 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.CountDownLatch$Sync@2328a609 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13486 Waited count: 13948 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@10fe4b5c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@678eb8df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7299a68c): State: TIMED_WAITING Blocked count: 0 Waited count: 871 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 88 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1021651416-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1021651416-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1021651416-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1021651416-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1021651416-41-acceptor-0@752a6971-ServerConnector@2847eba1{HTTP/1.1, (http/1.1)}{localhost:35059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1021651416-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1021651416-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1021651416-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-56b4614a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2907 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41796701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33329): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5d1d1282): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7f2d48e5): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 42911 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1235 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45ab02b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33329): State: TIMED_WAITING Blocked count: 109 Waited count: 2243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33329): State: TIMED_WAITING Blocked count: 86 Waited count: 2245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33329): State: TIMED_WAITING Blocked count: 118 Waited count: 2234 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33329): State: TIMED_WAITING Blocked count: 106 Waited count: 2229 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33329): State: TIMED_WAITING Blocked count: 97 Waited count: 2229 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab4ee67): State: TIMED_WAITING Blocked count: 0 Waited count: 218 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2e0c8d43): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2ae35f71): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@63bf282f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388150912-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388150912-88-acceptor-0@754170a3-ServerConnector@120fd64b{HTTP/1.1, (http/1.1)}{localhost:41125}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388150912-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1388150912-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7e10f8ad-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@95a30d6): State: TIMED_WAITING Blocked count: 0 Waited count: 868 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 42339): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 340 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79c92cfb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1392 Waited count: 1419 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1340d90b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 438 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 436 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1956370377-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1956370377-120-acceptor-0@7570cd01-ServerConnector@423c535d{HTTP/1.1, (http/1.1)}{localhost:36059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1956370377-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1956370377-122): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-2cc38048-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (309020234) connection to localhost/127.0.0.1:33329 from jenkins): State: TIMED_WAITING Blocked count: 1303 Waited count: 1304 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 0 Waited count: 1996 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@f6c94d4): State: TIMED_WAITING Blocked count: 0 Waited count: 868 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44765): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 298 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6be6fb02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1367 Waited count: 1415 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@72b5e237): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 536 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 491 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp395497086-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp395497086-154-acceptor-0@4e85d132-ServerConnector@27188009{HTTP/1.1, (http/1.1)}{localhost:34737}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp395497086-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp395497086-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-83efa6e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@27b376d6): State: TIMED_WAITING Blocked count: 0 Waited count: 868 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41527): State: TIMED_WAITING Blocked count: 1 Waited count: 45 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 301 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581883eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1265 Waited count: 1411 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@677edd20): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 435 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1)): State: TIMED_WAITING Blocked count: 33 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 194 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@1e372f9b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@358d9b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@47e47bbb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64248): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 217 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 29 Waited count: 666 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76aed49e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:64248):): State: WAITING Blocked count: 1 Waited count: 777 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e4ff6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 807 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bff6515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e007a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 2 Waited count: 342 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:64248)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 20 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@484eceed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54b25a4f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 9 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 7 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@19ae83e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 137 Waited count: 550 Waiting on java.util.concurrent.Semaphore$NonfairSync@6fa86368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 168 Waited count: 594 Waiting on java.util.concurrent.Semaphore$NonfairSync@51aff1d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789): State: WAITING Blocked count: 65 Waited count: 6086 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1789b98e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56ca262b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@78c54c65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22dacf3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7591bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 113 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;66d575e28c61:40789): State: TIMED_WAITING Blocked count: 6 Waited count: 2507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$970/0x00007f4f40f1c470.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@2a374a6e): State: TIMED_WAITING Blocked count: 0 Waited count: 144 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4296 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 83 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2cd291aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 43 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 42902 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58302821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e51ddb1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46b65c10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78ab7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 42687 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 605 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 363 Waiting on java.util.concurrent.ForkJoinPool@2e6e0460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 606 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 607 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 608 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1015 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1118 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@829102f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1531 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@41f0670c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3260 (region-location-4): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4946 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4947 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4948 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8929 (AsyncFSWAL-1-hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData-prefix:66d575e28c61,40789,1734226421776): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f632656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-15T01:41:10,822 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:41:40,822 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;66d575e28c61:40789 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 28 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@631b7d9f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5234a66a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.CountDownLatch$Sync@25ff75ac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13486 Waited count: 13949 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@10fe4b5c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@678eb8df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7299a68c): State: TIMED_WAITING Blocked count: 0 Waited count: 991 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1021651416-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1021651416-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1021651416-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1021651416-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1021651416-41-acceptor-0@752a6971-ServerConnector@2847eba1{HTTP/1.1, (http/1.1)}{localhost:35059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1021651416-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1021651416-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1021651416-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-56b4614a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2907 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41796701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33329): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5d1d1282): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7f2d48e5): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 168 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 48837 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1235 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45ab02b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33329): State: TIMED_WAITING Blocked count: 109 Waited count: 2304 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33329): State: TIMED_WAITING Blocked count: 86 Waited count: 2305 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33329): State: TIMED_WAITING Blocked count: 118 Waited count: 2294 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33329): State: TIMED_WAITING Blocked count: 106 Waited count: 2289 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33329): State: TIMED_WAITING Blocked count: 97 Waited count: 2289 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab4ee67): State: TIMED_WAITING Blocked count: 0 Waited count: 248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2e0c8d43): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2ae35f71): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@63bf282f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388150912-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388150912-88-acceptor-0@754170a3-ServerConnector@120fd64b{HTTP/1.1, (http/1.1)}{localhost:41125}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388150912-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1388150912-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7e10f8ad-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@95a30d6): State: TIMED_WAITING Blocked count: 0 Waited count: 988 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 42339): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 360 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79c92cfb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1412 Waited count: 1459 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1340d90b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 497 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 496 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1956370377-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1956370377-120-acceptor-0@7570cd01-ServerConnector@423c535d{HTTP/1.1, (http/1.1)}{localhost:36059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1956370377-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1956370377-122): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-2cc38048-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (309020234) connection to localhost/127.0.0.1:33329 from jenkins): State: TIMED_WAITING Blocked count: 1363 Waited count: 1364 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 0 Waited count: 2056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@f6c94d4): State: TIMED_WAITING Blocked count: 0 Waited count: 988 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44765): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 318 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6be6fb02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1387 Waited count: 1455 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@72b5e237): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 600 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp395497086-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp395497086-154-acceptor-0@4e85d132-ServerConnector@27188009{HTTP/1.1, (http/1.1)}{localhost:34737}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp395497086-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp395497086-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-83efa6e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@27b376d6): State: TIMED_WAITING Blocked count: 0 Waited count: 988 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41527): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 321 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581883eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1285 Waited count: 1451 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@677edd20): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 495 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1)): State: TIMED_WAITING Blocked count: 33 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 194 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@1e372f9b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@358d9b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@47e47bbb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64248): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 247 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 29 Waited count: 670 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76aed49e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:64248):): State: WAITING Blocked count: 1 Waited count: 781 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e4ff6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 811 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bff6515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e007a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 2 Waited count: 370 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:64248)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 20 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@484eceed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54b25a4f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 9 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@19ae83e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 137 Waited count: 550 Waiting on java.util.concurrent.Semaphore$NonfairSync@6fa86368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 168 Waited count: 594 Waiting on java.util.concurrent.Semaphore$NonfairSync@51aff1d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789): State: WAITING Blocked count: 65 Waited count: 6086 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1789b98e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56ca262b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@78c54c65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22dacf3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7591bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 113 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;66d575e28c61:40789): State: TIMED_WAITING Blocked count: 6 Waited count: 2507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$970/0x00007f4f40f1c470.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@2a374a6e): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4895 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 83 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2cd291aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 48904 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58302821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e51ddb1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46b65c10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78ab7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 48689 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 605 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 606 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 607 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 608 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1015 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1118 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@829102f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1531 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@41f0670c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3260 (region-location-4): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4946 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4947 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4948 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8929 (AsyncFSWAL-1-hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData-prefix:66d575e28c61,40789,1734226421776): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f632656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-15T01:42:10,822 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:42:40,823 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T01:42:53,191 DEBUG [M:0;66d575e28c61:40789 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-15T01:42:53,191 WARN [M:0;66d575e28c61:40789 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-15T01:42:53,194 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:42:53,196 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-15T01:42:53,196 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-15T01:42:53,196 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403 2024-12-15T01:42:53,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:42:53,198 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:42:53,198 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403 2024-12-15T01:42:53,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;66d575e28c61:40789 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 28 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@631b7d9f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5234a66a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 3 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 56 Waiting on java.util.concurrent.CountDownLatch$Sync@51cb40aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13486 Waited count: 13950 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.lang.ref.ReferenceQueue$Lock@10fe4b5c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@678eb8df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7299a68c): State: TIMED_WAITING Blocked count: 0 Waited count: 1111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1021651416-37): State: RUNNABLE Blocked count: 9 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1021651416-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1021651416-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1021651416-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1021651416-41-acceptor-0@752a6971-ServerConnector@2847eba1{HTTP/1.1, (http/1.1)}{localhost:35059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1021651416-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1021651416-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1021651416-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-56b4614a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2907 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41796701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33329): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5d1d1282): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 186 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7f2d48e5): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 54763 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1235 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45ab02b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33329): State: TIMED_WAITING Blocked count: 109 Waited count: 2364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33329): State: TIMED_WAITING Blocked count: 86 Waited count: 2366 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33329): State: TIMED_WAITING Blocked count: 118 Waited count: 2354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33329): State: TIMED_WAITING Blocked count: 106 Waited count: 2349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33329): State: TIMED_WAITING Blocked count: 97 Waited count: 2349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@ab4ee67): State: TIMED_WAITING Blocked count: 0 Waited count: 278 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2e0c8d43): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2ae35f71): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@63bf282f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1603592889)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388150912-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388150912-88-acceptor-0@754170a3-ServerConnector@120fd64b{HTTP/1.1, (http/1.1)}{localhost:41125}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388150912-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1388150912-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7e10f8ad-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@95a30d6): State: TIMED_WAITING Blocked count: 0 Waited count: 1108 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 42339): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 380 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79c92cfb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1432 Waited count: 1499 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1340d90b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 42339): State: TIMED_WAITING Blocked count: 0 Waited count: 557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1956370377-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1956370377-120-acceptor-0@7570cd01-ServerConnector@423c535d{HTTP/1.1, (http/1.1)}{localhost:36059}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1956370377-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1956370377-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (Session-HouseKeeper-2cc38048-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (IPC Client (309020234) connection to localhost/127.0.0.1:33329 from jenkins): State: TIMED_WAITING Blocked count: 1423 Waited count: 1424 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (IPC Parameter Sending Thread for localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 0 Waited count: 2116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@f6c94d4): State: TIMED_WAITING Blocked count: 0 Waited count: 1108 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44765): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 338 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6be6fb02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1407 Waited count: 1495 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@72b5e237): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 603 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44765): State: TIMED_WAITING Blocked count: 0 Waited count: 631 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp395497086-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f4f40428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp395497086-154-acceptor-0@4e85d132-ServerConnector@27188009{HTTP/1.1, (http/1.1)}{localhost:34737}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp395497086-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp395497086-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-83efa6e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@27b376d6): State: TIMED_WAITING Blocked count: 0 Waited count: 1108 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 41527): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 341 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581883eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329): State: TIMED_WAITING Blocked count: 1305 Waited count: 1491 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@677edd20): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 41527): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1)): State: TIMED_WAITING Blocked count: 33 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 194 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@1e372f9b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@358d9b23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5/current/BP-747981782-172.17.0.2-1734226417326): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@47e47bbb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64248): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 277 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 29 Waited count: 675 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76aed49e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:64248):): State: WAITING Blocked count: 1 Waited count: 786 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e4ff6a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 816 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bff6515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e007a2d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 2 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:64248)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 20 Waited count: 57 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@484eceed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54b25a4f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 9 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 7 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 7 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 7 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e0a4d56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@19ae83e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 137 Waited count: 550 Waiting on java.util.concurrent.Semaphore$NonfairSync@6fa86368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 168 Waited count: 594 Waiting on java.util.concurrent.Semaphore$NonfairSync@51aff1d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40789): State: WAITING Blocked count: 65 Waited count: 6086 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1789b98e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a55b92a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56ca262b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@78c54c65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@22dacf3a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40789): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7591bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 289 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 113 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;66d575e28c61:40789): State: TIMED_WAITING Blocked count: 6 Waited count: 2508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/66d575e28c61:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@2a374a6e): State: TIMED_WAITING Blocked count: 0 Waited count: 184 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5494 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 397 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 83 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2cd291aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54905 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 431 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58302821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 475 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e51ddb1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46b65c10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/66d575e28c61:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@78ab7a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54690 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 591 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 606 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 607 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 608 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1015 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 361 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1118 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1120 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@829102f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1178 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1179 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1531 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 33 Waiting on java.util.TaskQueue@41f0670c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3260 (region-location-4): State: WAITING Blocked count: 3 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ea2f650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4946 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4947 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4948 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8929 (AsyncFSWAL-1-hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData-prefix:66d575e28c61,40789,1734226421776): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f632656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8934 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8935 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8936 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1149/0x00007f4f41163d18.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-15T01:42:57,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T01:42:58,194 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-15T01:42:58,195 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-15T01:42:58,195 INFO [M:0;66d575e28c61:40789 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-15T01:42:58,196 INFO [M:0;66d575e28c61:40789 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40789 2024-12-15T01:42:58,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33329/user/jenkins/test-data/94ad8fc7-f512-0e08-0d6d-616aaeaea21b/MasterData/WALs/66d575e28c61,40789,1734226421776/66d575e28c61%2C40789%2C1734226421776.1734226423403 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-15T01:42:58,271 DEBUG [M:0;66d575e28c61:40789 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/66d575e28c61,40789,1734226421776 already deleted, retry=false 2024-12-15T01:42:58,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:42:58,380 INFO [M:0;66d575e28c61:40789 {}] regionserver.HRegionServer(1307): Exiting; stopping=66d575e28c61,40789,1734226421776; zookeeper connection closed. 2024-12-15T01:42:58,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40789-0x1002752410c0000, quorum=127.0.0.1:64248, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T01:42:58,426 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@321bdc95{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T01:42:58,427 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27188009{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:42:58,427 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:42:58,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bfb305e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-15T01:42:58,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@689e564d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:42:58,430 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-15T01:42:58,430 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-15T01:42:58,430 WARN [BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-15T01:42:58,430 WARN [BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-747981782-172.17.0.2-1734226417326 (Datanode Uuid db4c7c2f-79b5-4215-8052-98ca3651da77) service to localhost/127.0.0.1:33329 2024-12-15T01:42:58,433 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data5/current/BP-747981782-172.17.0.2-1734226417326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T01:42:58,433 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data6/current/BP-747981782-172.17.0.2-1734226417326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T01:42:58,434 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-15T01:42:58,436 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8add7a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T01:42:58,436 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@423c535d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:42:58,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:42:58,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18863edc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-15T01:42:58,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aead573{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:42:58,438 WARN [BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-15T01:42:58,438 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-15T01:42:58,438 WARN [BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-747981782-172.17.0.2-1734226417326 (Datanode Uuid 84f809e7-5568-47ff-ae29-cccdf581a3d9) service to localhost/127.0.0.1:33329 2024-12-15T01:42:58,438 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-15T01:42:58,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data3/current/BP-747981782-172.17.0.2-1734226417326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T01:42:58,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data4/current/BP-747981782-172.17.0.2-1734226417326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T01:42:58,439 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-15T01:42:58,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fab760a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T01:42:58,441 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@120fd64b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:42:58,441 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:42:58,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b69292c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-15T01:42:58,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57ea78fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:42:58,442 WARN [BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-15T01:42:58,442 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-15T01:42:58,442 WARN [BP-747981782-172.17.0.2-1734226417326 heartbeating to localhost/127.0.0.1:33329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-747981782-172.17.0.2-1734226417326 (Datanode Uuid dc094a0d-88b6-4c61-9536-9650187be222) service to localhost/127.0.0.1:33329 2024-12-15T01:42:58,442 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-15T01:42:58,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data1/current/BP-747981782-172.17.0.2-1734226417326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T01:42:58,443 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/cluster_6c631f5c-168f-a2c7-a0a3-731c46bded10/dfs/data/data2/current/BP-747981782-172.17.0.2-1734226417326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T01:42:58,443 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-15T01:42:58,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@275c2418{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-15T01:42:58,449 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2847eba1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T01:42:58,449 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T01:42:58,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ad95383{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-15T01:42:58,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@304604cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/5271baa4-78a5-b1e4-f262-28664f33e5d8/hadoop.log.dir/,STOPPED} 2024-12-15T01:42:58,460 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-15T01:42:58,684 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down