2024-11-22 06:01:53,161 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-22 06:01:53,201 main DEBUG Took 0.025321 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 06:01:53,202 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 06:01:53,202 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 06:01:53,204 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 06:01:53,206 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,237 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 06:01:53,258 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,260 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,261 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,261 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,262 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,262 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,264 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,264 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,265 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,265 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,266 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,267 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,268 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,268 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,269 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,269 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,270 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,270 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,271 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,271 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,272 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,272 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,273 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,273 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 06:01:53,274 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,275 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 06:01:53,283 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 06:01:53,291 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 06:01:53,294 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 06:01:53,301 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 06:01:53,304 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 06:01:53,304 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 06:01:53,332 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 06:01:53,342 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 06:01:53,357 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 06:01:53,359 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 06:01:53,375 main DEBUG createAppenders(={Console}) 2024-11-22 06:01:53,377 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-11-22 06:01:53,378 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-22 06:01:53,379 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-11-22 06:01:53,380 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 06:01:53,381 main DEBUG OutputStream closed 2024-11-22 06:01:53,383 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 06:01:53,383 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 06:01:53,384 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-11-22 06:01:53,537 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 06:01:53,540 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 06:01:53,542 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 06:01:53,543 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 06:01:53,544 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 06:01:53,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 06:01:53,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 06:01:53,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 06:01:53,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 06:01:53,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 06:01:53,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 06:01:53,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 06:01:53,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 06:01:53,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 06:01:53,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 06:01:53,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 06:01:53,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 06:01:53,550 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 06:01:53,553 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 06:01:53,554 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-11-22 06:01:53,554 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 06:01:53,555 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-11-22T06:01:53,573 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-22 06:01:53,576 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 06:01:53,577 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T06:01:54,014 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be 2024-11-22T06:01:54,015 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-11-22T06:01:54,123 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T06:01:54,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T06:01:54,596 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48, deleteOnExit=true 2024-11-22T06:01:54,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T06:01:54,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/test.cache.data in system properties and HBase conf 2024-11-22T06:01:54,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T06:01:54,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir in system properties and HBase conf 2024-11-22T06:01:54,601 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T06:01:54,602 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T06:01:54,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T06:01:54,734 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T06:01:54,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T06:01:54,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T06:01:54,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T06:01:54,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T06:01:54,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T06:01:54,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T06:01:54,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T06:01:54,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T06:01:54,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T06:01:54,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/nfs.dump.dir in system properties and HBase conf 2024-11-22T06:01:54,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir in system properties and HBase conf 2024-11-22T06:01:54,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T06:01:54,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T06:01:54,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T06:01:56,196 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T06:01:56,307 INFO [Time-limited test {}] log.Log(170): Logging initialized @4198ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T06:01:56,395 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:01:56,499 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:01:56,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:01:56,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:01:56,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T06:01:56,591 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:01:56,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:01:56,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T06:01:56,868 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a55f3e1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-41781-hadoop-hdfs-3_4_1-tests_jar-_-any-5967395673012392725/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T06:01:56,887 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:41781} 2024-11-22T06:01:56,887 INFO [Time-limited test {}] server.Server(415): Started @4779ms 2024-11-22T06:01:57,480 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:01:57,489 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:01:57,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:01:57,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:01:57,492 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T06:01:57,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d9bb98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:01:57,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134642c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T06:01:57,609 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4177c147{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-41047-hadoop-hdfs-3_4_1-tests_jar-_-any-12384675819551660046/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T06:01:57,610 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f6e660c{HTTP/1.1, (http/1.1)}{localhost:41047} 2024-11-22T06:01:57,611 INFO [Time-limited test {}] server.Server(415): Started @5503ms 2024-11-22T06:01:57,685 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T06:01:57,882 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:01:57,908 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:01:57,910 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:01:57,910 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:01:57,911 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T06:01:57,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f60738e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:01:57,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ed86ab3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T06:01:58,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7193640b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-37893-hadoop-hdfs-3_4_1-tests_jar-_-any-17809483620371409251/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T06:01:58,062 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7112b791{HTTP/1.1, (http/1.1)}{localhost:37893} 2024-11-22T06:01:58,063 INFO [Time-limited test {}] server.Server(415): Started @5955ms 2024-11-22T06:01:58,069 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T06:01:58,134 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:01:58,142 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:01:58,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:01:58,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:01:58,152 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T06:01:58,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f667f57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:01:58,158 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e41eadb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T06:01:58,284 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@174d7797{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-42407-hadoop-hdfs-3_4_1-tests_jar-_-any-8265497038746245879/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T06:01:58,285 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ed153c3{HTTP/1.1, (http/1.1)}{localhost:42407} 2024-11-22T06:01:58,285 INFO [Time-limited test {}] server.Server(415): Started @6177ms 2024-11-22T06:01:58,287 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T06:01:59,356 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2/current/BP-1246063832-172.17.0.2-1732255315577/current, will proceed with Du for space computation calculation, 2024-11-22T06:01:59,356 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1/current/BP-1246063832-172.17.0.2-1732255315577/current, will proceed with Du for space computation calculation, 2024-11-22T06:01:59,387 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T06:01:59,422 WARN [Thread-131 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3/current/BP-1246063832-172.17.0.2-1732255315577/current, will proceed with Du for space computation calculation, 2024-11-22T06:01:59,423 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4/current/BP-1246063832-172.17.0.2-1732255315577/current, will proceed with Du for space computation calculation, 2024-11-22T06:01:59,437 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9aec1042ffc312cd with lease ID 0xc9871ddc340d0b43: Processing first storage report for DS-08cd23bf-0e4d-49b5-a7a5-1207b139cd06 from datanode DatanodeRegistration(127.0.0.1:35893, datanodeUuid=5348112f-8cdf-4055-aedf-c9929e8181eb, infoPort=40289, infoSecurePort=0, ipcPort=44493, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577) 2024-11-22T06:01:59,438 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9aec1042ffc312cd with lease ID 0xc9871ddc340d0b43: from storage DS-08cd23bf-0e4d-49b5-a7a5-1207b139cd06 node DatanodeRegistration(127.0.0.1:35893, datanodeUuid=5348112f-8cdf-4055-aedf-c9929e8181eb, infoPort=40289, infoSecurePort=0, ipcPort=44493, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T06:01:59,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9aec1042ffc312cd with lease ID 0xc9871ddc340d0b43: Processing first storage report for DS-0bddcc08-2c72-40df-9514-e7234b40ed75 from datanode DatanodeRegistration(127.0.0.1:35893, datanodeUuid=5348112f-8cdf-4055-aedf-c9929e8181eb, infoPort=40289, infoSecurePort=0, ipcPort=44493, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577) 2024-11-22T06:01:59,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9aec1042ffc312cd with lease ID 0xc9871ddc340d0b43: from storage DS-0bddcc08-2c72-40df-9514-e7234b40ed75 node DatanodeRegistration(127.0.0.1:35893, datanodeUuid=5348112f-8cdf-4055-aedf-c9929e8181eb, infoPort=40289, infoSecurePort=0, ipcPort=44493, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T06:01:59,458 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T06:01:59,465 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b40cd22920b3c76 with lease ID 0xc9871ddc340d0b44: Processing first storage report for DS-0b5581d3-dd67-41a8-861e-1dad663ec757 from datanode DatanodeRegistration(127.0.0.1:43345, datanodeUuid=5eafc69a-8a8f-4daa-847b-e5a5550eff99, infoPort=45629, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577) 2024-11-22T06:01:59,465 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b40cd22920b3c76 with lease ID 0xc9871ddc340d0b44: from storage DS-0b5581d3-dd67-41a8-861e-1dad663ec757 node DatanodeRegistration(127.0.0.1:43345, datanodeUuid=5eafc69a-8a8f-4daa-847b-e5a5550eff99, infoPort=45629, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T06:01:59,466 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b40cd22920b3c76 with lease ID 0xc9871ddc340d0b44: Processing first storage report for DS-d99b2c4b-58fd-495f-a59a-e48daca0bf6b from datanode DatanodeRegistration(127.0.0.1:43345, datanodeUuid=5eafc69a-8a8f-4daa-847b-e5a5550eff99, infoPort=45629, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577) 2024-11-22T06:01:59,466 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b40cd22920b3c76 with lease ID 0xc9871ddc340d0b44: from storage DS-d99b2c4b-58fd-495f-a59a-e48daca0bf6b node DatanodeRegistration(127.0.0.1:43345, datanodeUuid=5eafc69a-8a8f-4daa-847b-e5a5550eff99, infoPort=45629, infoSecurePort=0, ipcPort=34425, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T06:01:59,574 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5/current/BP-1246063832-172.17.0.2-1732255315577/current, will proceed with Du for space computation calculation, 2024-11-22T06:01:59,576 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6/current/BP-1246063832-172.17.0.2-1732255315577/current, will proceed with Du for space computation calculation, 2024-11-22T06:01:59,618 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T06:01:59,629 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x729ea82cb48ae776 with lease ID 0xc9871ddc340d0b45: Processing first storage report for DS-0c52a8d8-8d32-460f-a6ef-8426314f1096 from datanode DatanodeRegistration(127.0.0.1:43209, datanodeUuid=7b2f6a5a-1bea-4654-98f0-f374a7ca4605, infoPort=35589, infoSecurePort=0, ipcPort=38763, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577) 2024-11-22T06:01:59,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x729ea82cb48ae776 with lease ID 0xc9871ddc340d0b45: from storage DS-0c52a8d8-8d32-460f-a6ef-8426314f1096 node DatanodeRegistration(127.0.0.1:43209, datanodeUuid=7b2f6a5a-1bea-4654-98f0-f374a7ca4605, infoPort=35589, infoSecurePort=0, ipcPort=38763, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T06:01:59,630 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x729ea82cb48ae776 with lease ID 0xc9871ddc340d0b45: Processing first storage report for DS-f45882be-6732-4cf3-9793-11db8ff216d4 from datanode DatanodeRegistration(127.0.0.1:43209, datanodeUuid=7b2f6a5a-1bea-4654-98f0-f374a7ca4605, infoPort=35589, infoSecurePort=0, ipcPort=38763, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577) 2024-11-22T06:01:59,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x729ea82cb48ae776 with lease ID 0xc9871ddc340d0b45: from storage DS-f45882be-6732-4cf3-9793-11db8ff216d4 node DatanodeRegistration(127.0.0.1:43209, datanodeUuid=7b2f6a5a-1bea-4654-98f0-f374a7ca4605, infoPort=35589, infoSecurePort=0, ipcPort=38763, storageInfo=lv=-57;cid=testClusterID;nsid=1786309834;c=1732255315577), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T06:01:59,722 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be 2024-11-22T06:01:59,798 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/zookeeper_0, clientPort=59716, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T06:01:59,811 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59716 2024-11-22T06:01:59,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:01:59,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:00,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741825_1001 (size=7) 2024-11-22T06:02:00,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741825_1001 (size=7) 2024-11-22T06:02:00,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741825_1001 (size=7) 2024-11-22T06:02:00,489 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 with version=8 2024-11-22T06:02:00,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/hbase-staging 2024-11-22T06:02:00,579 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T06:02:00,845 INFO [Time-limited test {}] client.ConnectionUtils(128): master/407411ea95db:0 server-side Connection retries=45 2024-11-22T06:02:00,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:00,857 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:00,864 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T06:02:00,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:00,865 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T06:02:01,049 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T06:02:01,131 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T06:02:01,144 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T06:02:01,148 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T06:02:01,174 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 107376 (auto-detected) 2024-11-22T06:02:01,175 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-22T06:02:01,194 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40429 2024-11-22T06:02:01,216 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40429 connecting to ZooKeeper ensemble=127.0.0.1:59716 2024-11-22T06:02:01,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404290x0, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T06:02:01,336 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40429-0x101626c19a90000 connected 2024-11-22T06:02:01,414 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:02:01,447 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788, hbase.cluster.distributed=false 2024-11-22T06:02:01,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T06:02:01,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40429 2024-11-22T06:02:01,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40429 2024-11-22T06:02:01,510 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40429 2024-11-22T06:02:01,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40429 2024-11-22T06:02:01,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40429 2024-11-22T06:02:01,610 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/407411ea95db:0 server-side Connection retries=45 2024-11-22T06:02:01,612 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,613 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,613 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T06:02:01,613 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,613 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T06:02:01,617 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T06:02:01,620 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T06:02:01,621 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44619 2024-11-22T06:02:01,624 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44619 connecting to ZooKeeper ensemble=127.0.0.1:59716 2024-11-22T06:02:01,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,662 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446190x0, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T06:02:01,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446190x0, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:02:01,663 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44619-0x101626c19a90001 connected 2024-11-22T06:02:01,668 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T06:02:01,678 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T06:02:01,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T06:02:01,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T06:02:01,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44619 2024-11-22T06:02:01,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44619 2024-11-22T06:02:01,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44619 2024-11-22T06:02:01,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44619 2024-11-22T06:02:01,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44619 2024-11-22T06:02:01,718 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/407411ea95db:0 server-side Connection retries=45 2024-11-22T06:02:01,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,719 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T06:02:01,720 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,720 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T06:02:01,720 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T06:02:01,720 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T06:02:01,721 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41799 2024-11-22T06:02:01,724 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41799 connecting to ZooKeeper ensemble=127.0.0.1:59716 2024-11-22T06:02:01,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417990x0, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T06:02:01,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41799-0x101626c19a90002 connected 2024-11-22T06:02:01,747 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:02:01,747 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T06:02:01,750 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T06:02:01,752 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T06:02:01,754 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T06:02:01,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41799 2024-11-22T06:02:01,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41799 2024-11-22T06:02:01,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41799 2024-11-22T06:02:01,761 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41799 2024-11-22T06:02:01,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41799 2024-11-22T06:02:01,785 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/407411ea95db:0 server-side Connection retries=45 2024-11-22T06:02:01,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,785 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T06:02:01,786 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T06:02:01,786 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T06:02:01,786 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T06:02:01,786 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T06:02:01,787 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35283 2024-11-22T06:02:01,790 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35283 connecting to ZooKeeper ensemble=127.0.0.1:59716 2024-11-22T06:02:01,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:01,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352830x0, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T06:02:01,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:352830x0, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:02:01,805 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T06:02:01,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35283-0x101626c19a90003 connected 2024-11-22T06:02:01,811 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T06:02:01,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T06:02:01,814 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T06:02:01,819 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35283 2024-11-22T06:02:01,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35283 2024-11-22T06:02:01,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35283 2024-11-22T06:02:01,824 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35283 2024-11-22T06:02:01,826 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35283 2024-11-22T06:02:01,841 DEBUG [M:0;407411ea95db:40429 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;407411ea95db:40429 2024-11-22T06:02:01,843 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/407411ea95db,40429,1732255320691 2024-11-22T06:02:01,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,854 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,856 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/407411ea95db,40429,1732255320691 2024-11-22T06:02:01,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T06:02:01,887 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T06:02:01,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T06:02:01,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:01,887 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:01,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:01,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:01,888 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T06:02:01,890 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/407411ea95db,40429,1732255320691 from backup master directory 2024-11-22T06:02:01,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/407411ea95db,40429,1732255320691 2024-11-22T06:02:01,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,905 WARN [master/407411ea95db:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T06:02:01,905 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=407411ea95db,40429,1732255320691 2024-11-22T06:02:01,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,906 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T06:02:01,908 INFO [master/407411ea95db:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T06:02:01,910 INFO [master/407411ea95db:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T06:02:01,971 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/hbase.id] with ID: 6eeb2921-6f20-4de7-bcc0-81ebecd4007b 2024-11-22T06:02:01,971 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.tmp/hbase.id 2024-11-22T06:02:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741826_1002 (size=42) 2024-11-22T06:02:01,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741826_1002 (size=42) 2024-11-22T06:02:01,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741826_1002 (size=42) 2024-11-22T06:02:01,999 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.tmp/hbase.id]:[hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/hbase.id] 2024-11-22T06:02:02,053 INFO [master/407411ea95db:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T06:02:02,059 INFO [master/407411ea95db:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T06:02:02,079 INFO [master/407411ea95db:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-22T06:02:02,095 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741827_1003 (size=196) 2024-11-22T06:02:02,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741827_1003 (size=196) 2024-11-22T06:02:02,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741827_1003 (size=196) 2024-11-22T06:02:02,137 INFO [master/407411ea95db:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:02:02,140 INFO [master/407411ea95db:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T06:02:02,159 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:02:02,165 INFO [master/407411ea95db:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T06:02:02,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741828_1004 (size=1189) 2024-11-22T06:02:02,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741828_1004 (size=1189) 2024-11-22T06:02:02,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741828_1004 (size=1189) 2024-11-22T06:02:02,238 INFO [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/data/master/store 2024-11-22T06:02:02,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741829_1005 (size=34) 2024-11-22T06:02:02,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741829_1005 (size=34) 2024-11-22T06:02:02,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741829_1005 (size=34) 2024-11-22T06:02:02,274 INFO [master/407411ea95db:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T06:02:02,277 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:02,279 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T06:02:02,279 INFO [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T06:02:02,279 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T06:02:02,281 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T06:02:02,281 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T06:02:02,281 INFO [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T06:02:02,282 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732255322279Disabling compacts and flushes for region at 1732255322279Disabling writes for close at 1732255322281 (+2 ms)Writing region close event to WAL at 1732255322281Closed at 1732255322281 2024-11-22T06:02:02,284 WARN [master/407411ea95db:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/data/master/store/.initializing 2024-11-22T06:02:02,284 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691 2024-11-22T06:02:02,291 INFO [master/407411ea95db:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T06:02:02,306 INFO [master/407411ea95db:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=407411ea95db%2C40429%2C1732255320691, suffix=, logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691, archiveDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/oldWALs, maxLogs=10 2024-11-22T06:02:02,335 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313, exclude list is [], retry=0 2024-11-22T06:02:02,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43345,DS-0b5581d3-dd67-41a8-861e-1dad663ec757,DISK] 2024-11-22T06:02:02,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43209,DS-0c52a8d8-8d32-460f-a6ef-8426314f1096,DISK] 2024-11-22T06:02:02,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35893,DS-08cd23bf-0e4d-49b5-a7a5-1207b139cd06,DISK] 2024-11-22T06:02:02,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-22T06:02:02,392 INFO [master/407411ea95db:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313 2024-11-22T06:02:02,393 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40289:40289),(127.0.0.1/127.0.0.1:45629:45629),(127.0.0.1/127.0.0.1:35589:35589)] 2024-11-22T06:02:02,393 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T06:02:02,393 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:02,396 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,397 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,432 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T06:02:02,460 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:02,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:02,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T06:02:02,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:02,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:02,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T06:02:02,471 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:02,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:02,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T06:02:02,475 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:02,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:02,477 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,480 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,481 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,487 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,487 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,491 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T06:02:02,495 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T06:02:02,499 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:02,501 INFO [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63288563, jitterRate=-0.056926921010017395}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T06:02:02,507 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732255322410Initializing all the Stores at 1732255322412 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255322412Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255322413 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255322413Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255322413Cleaning up temporary data from old regions at 1732255322487 (+74 ms)Region opened successfully at 1732255322507 (+20 ms) 2024-11-22T06:02:02,508 INFO [master/407411ea95db:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T06:02:02,543 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c74698c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=407411ea95db/172.17.0.2:0 2024-11-22T06:02:02,577 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T06:02:02,590 INFO [master/407411ea95db:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T06:02:02,591 INFO [master/407411ea95db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T06:02:02,593 INFO [master/407411ea95db:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T06:02:02,595 INFO [master/407411ea95db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-22T06:02:02,601 INFO [master/407411ea95db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-22T06:02:02,601 INFO [master/407411ea95db:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T06:02:02,630 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T06:02:02,640 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T06:02:02,703 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T06:02:02,706 INFO [master/407411ea95db:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T06:02:02,708 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T06:02:02,720 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T06:02:02,723 INFO [master/407411ea95db:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T06:02:02,727 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T06:02:02,736 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T06:02:02,738 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T06:02:02,744 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T06:02:02,761 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T06:02:02,769 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T06:02:02,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T06:02:02,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T06:02:02,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T06:02:02,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,778 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T06:02:02,779 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,782 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=407411ea95db,40429,1732255320691, sessionid=0x101626c19a90000, setting cluster-up flag (Was=false) 2024-11-22T06:02:02,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,811 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,836 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T06:02:02,838 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=407411ea95db,40429,1732255320691 2024-11-22T06:02:02,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,853 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:02,878 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T06:02:02,880 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=407411ea95db,40429,1732255320691 2024-11-22T06:02:02,888 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T06:02:02,925 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-22T06:02:02,930 INFO [master/407411ea95db:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:02,931 INFO [master/407411ea95db:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-22T06:02:02,931 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(746): ClusterId : 6eeb2921-6f20-4de7-bcc0-81ebecd4007b 2024-11-22T06:02:02,931 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(746): ClusterId : 6eeb2921-6f20-4de7-bcc0-81ebecd4007b 2024-11-22T06:02:02,932 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(746): ClusterId : 6eeb2921-6f20-4de7-bcc0-81ebecd4007b 2024-11-22T06:02:02,935 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T06:02:02,935 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T06:02:02,935 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T06:02:02,964 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T06:02:02,964 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T06:02:02,964 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T06:02:02,964 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T06:02:02,970 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T06:02:02,970 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T06:02:02,971 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T06:02:02,971 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T06:02:02,972 DEBUG [RS:2;407411ea95db:35283 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ced781f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=407411ea95db/172.17.0.2:0 2024-11-22T06:02:02,972 DEBUG [RS:0;407411ea95db:44619 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787c684d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=407411ea95db/172.17.0.2:0 2024-11-22T06:02:02,991 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T06:02:02,991 DEBUG [RS:1;407411ea95db:41799 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@766c0641, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=407411ea95db/172.17.0.2:0 2024-11-22T06:02:02,993 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;407411ea95db:35283 2024-11-22T06:02:02,993 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;407411ea95db:44619 2024-11-22T06:02:02,996 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T06:02:02,997 INFO [RS:2;407411ea95db:35283 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T06:02:02,997 INFO [RS:0;407411ea95db:44619 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T06:02:02,997 INFO [RS:0;407411ea95db:44619 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T06:02:02,997 INFO [RS:2;407411ea95db:35283 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T06:02:02,998 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-22T06:02:02,998 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-22T06:02:02,998 INFO [RS:2;407411ea95db:35283 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:02,998 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T06:02:03,002 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(2659): reportForDuty to master=407411ea95db,40429,1732255320691 with port=35283, startcode=1732255321784 2024-11-22T06:02:03,002 INFO [RS:0;407411ea95db:44619 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:03,002 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T06:02:03,004 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(2659): reportForDuty to master=407411ea95db,40429,1732255320691 with port=44619, startcode=1732255321573 2024-11-22T06:02:03,009 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;407411ea95db:41799 2024-11-22T06:02:03,010 INFO [RS:1;407411ea95db:41799 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T06:02:03,010 INFO [master/407411ea95db:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T06:02:03,010 INFO [RS:1;407411ea95db:41799 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T06:02:03,010 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-22T06:02:03,010 INFO [RS:1;407411ea95db:41799 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:03,011 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T06:02:03,012 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(2659): reportForDuty to master=407411ea95db,40429,1732255320691 with port=41799, startcode=1732255321718 2024-11-22T06:02:03,015 DEBUG [RS:0;407411ea95db:44619 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T06:02:03,016 DEBUG [RS:2;407411ea95db:35283 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T06:02:03,015 DEBUG [RS:1;407411ea95db:41799 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T06:02:03,019 INFO [master/407411ea95db:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T06:02:03,028 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 407411ea95db,40429,1732255320691 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T06:02:03,038 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/407411ea95db:0, corePoolSize=5, maxPoolSize=5 2024-11-22T06:02:03,039 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/407411ea95db:0, corePoolSize=5, maxPoolSize=5 2024-11-22T06:02:03,039 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/407411ea95db:0, corePoolSize=5, maxPoolSize=5 2024-11-22T06:02:03,039 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/407411ea95db:0, corePoolSize=5, maxPoolSize=5 2024-11-22T06:02:03,039 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/407411ea95db:0, corePoolSize=10, maxPoolSize=10 2024-11-22T06:02:03,040 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,040 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/407411ea95db:0, corePoolSize=2, maxPoolSize=2 2024-11-22T06:02:03,040 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,065 INFO [master/407411ea95db:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732255353065 2024-11-22T06:02:03,066 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44121, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T06:02:03,067 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T06:02:03,068 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51581, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T06:02:03,069 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54339, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T06:02:03,075 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T06:02:03,076 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T06:02:03,076 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T06:02:03,077 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T06:02:03,080 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T06:02:03,080 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T06:02:03,080 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T06:02:03,081 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T06:02:03,084 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:03,082 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,084 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T06:02:03,085 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T06:02:03,087 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-22T06:02:03,088 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T06:02:03,092 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T06:02:03,092 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T06:02:03,097 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T06:02:03,098 INFO [master/407411ea95db:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T06:02:03,100 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/407411ea95db:0:becomeActiveMaster-HFileCleaner.large.0-1732255323099,5,FailOnTimeoutGroup] 2024-11-22T06:02:03,101 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/407411ea95db:0:becomeActiveMaster-HFileCleaner.small.0-1732255323100,5,FailOnTimeoutGroup] 2024-11-22T06:02:03,101 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,101 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T06:02:03,103 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,104 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741831_1007 (size=1321) 2024-11-22T06:02:03,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741831_1007 (size=1321) 2024-11-22T06:02:03,113 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-22T06:02:03,113 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-22T06:02:03,113 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-22T06:02:03,113 WARN [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-22T06:02:03,113 WARN [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-22T06:02:03,113 WARN [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-22T06:02:03,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741831_1007 (size=1321) 2024-11-22T06:02:03,116 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T06:02:03,117 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741832_1008 (size=32) 2024-11-22T06:02:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741832_1008 (size=32) 2024-11-22T06:02:03,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741832_1008 (size=32) 2024-11-22T06:02:03,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:03,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T06:02:03,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T06:02:03,142 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:03,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:03,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T06:02:03,146 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T06:02:03,146 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:03,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:03,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T06:02:03,151 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T06:02:03,151 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:03,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:03,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T06:02:03,156 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T06:02:03,156 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:03,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:03,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T06:02:03,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740 2024-11-22T06:02:03,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740 2024-11-22T06:02:03,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T06:02:03,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T06:02:03,167 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T06:02:03,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T06:02:03,175 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:03,176 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66894966, jitterRate=-0.003187328577041626}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T06:02:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732255323136Initializing all the Stores at 1732255323137 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255323138 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255323138Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255323138Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255323138Cleaning up temporary data from old regions at 1732255323166 (+28 ms)Region opened successfully at 1732255323180 (+14 ms) 2024-11-22T06:02:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T06:02:03,181 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T06:02:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T06:02:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T06:02:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T06:02:03,183 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T06:02:03,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732255323181Disabling compacts and flushes for region at 1732255323181Disabling writes for close at 1732255323181Writing region close event to WAL at 1732255323183 (+2 ms)Closed at 1732255323183 2024-11-22T06:02:03,187 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T06:02:03,187 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T06:02:03,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T06:02:03,206 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T06:02:03,210 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T06:02:03,215 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(2659): reportForDuty to master=407411ea95db,40429,1732255320691 with port=41799, startcode=1732255321718 2024-11-22T06:02:03,215 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(2659): reportForDuty to master=407411ea95db,40429,1732255320691 with port=35283, startcode=1732255321784 2024-11-22T06:02:03,216 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(2659): reportForDuty to master=407411ea95db,40429,1732255320691 with port=44619, startcode=1732255321573 2024-11-22T06:02:03,217 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 407411ea95db,35283,1732255321784 2024-11-22T06:02:03,220 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] master.ServerManager(517): Registering regionserver=407411ea95db,35283,1732255321784 2024-11-22T06:02:03,229 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 407411ea95db,41799,1732255321718 2024-11-22T06:02:03,229 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:03,229 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33847 2024-11-22T06:02:03,229 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T06:02:03,229 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] master.ServerManager(517): Registering regionserver=407411ea95db,41799,1732255321718 2024-11-22T06:02:03,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 407411ea95db,44619,1732255321573 2024-11-22T06:02:03,233 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:03,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] master.ServerManager(517): Registering regionserver=407411ea95db,44619,1732255321573 2024-11-22T06:02:03,233 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33847 2024-11-22T06:02:03,233 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T06:02:03,236 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:03,236 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33847 2024-11-22T06:02:03,236 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T06:02:03,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T06:02:03,289 DEBUG [RS:2;407411ea95db:35283 {}] zookeeper.ZKUtil(111): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/407411ea95db,35283,1732255321784 2024-11-22T06:02:03,290 WARN [RS:2;407411ea95db:35283 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T06:02:03,290 INFO [RS:2;407411ea95db:35283 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T06:02:03,290 DEBUG [RS:0;407411ea95db:44619 {}] zookeeper.ZKUtil(111): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/407411ea95db,44619,1732255321573 2024-11-22T06:02:03,290 DEBUG [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,35283,1732255321784 2024-11-22T06:02:03,290 DEBUG [RS:1;407411ea95db:41799 {}] zookeeper.ZKUtil(111): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/407411ea95db,41799,1732255321718 2024-11-22T06:02:03,290 WARN [RS:0;407411ea95db:44619 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T06:02:03,290 WARN [RS:1;407411ea95db:41799 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T06:02:03,291 INFO [RS:1;407411ea95db:41799 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T06:02:03,291 INFO [RS:0;407411ea95db:44619 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T06:02:03,291 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,44619,1732255321573 2024-11-22T06:02:03,291 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [407411ea95db,44619,1732255321573] 2024-11-22T06:02:03,291 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [407411ea95db,35283,1732255321784] 2024-11-22T06:02:03,291 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [407411ea95db,41799,1732255321718] 2024-11-22T06:02:03,292 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,41799,1732255321718 2024-11-22T06:02:03,319 INFO [RS:2;407411ea95db:35283 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T06:02:03,319 INFO [RS:0;407411ea95db:44619 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T06:02:03,319 INFO [RS:1;407411ea95db:41799 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T06:02:03,337 INFO [RS:1;407411ea95db:41799 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T06:02:03,337 INFO [RS:0;407411ea95db:44619 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T06:02:03,338 INFO [RS:2;407411ea95db:35283 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T06:02:03,344 INFO [RS:1;407411ea95db:41799 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T06:02:03,344 INFO [RS:0;407411ea95db:44619 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T06:02:03,344 INFO [RS:2;407411ea95db:35283 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T06:02:03,344 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,344 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,344 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,349 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T06:02:03,349 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T06:02:03,349 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T06:02:03,356 INFO [RS:0;407411ea95db:44619 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T06:02:03,356 INFO [RS:1;407411ea95db:41799 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T06:02:03,356 INFO [RS:2;407411ea95db:35283 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T06:02:03,358 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,358 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,358 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,358 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,358 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,359 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,358 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,358 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,359 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,359 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,359 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,359 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,359 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/407411ea95db:0, corePoolSize=2, maxPoolSize=2 2024-11-22T06:02:03,359 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,359 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/407411ea95db:0, corePoolSize=2, maxPoolSize=2 2024-11-22T06:02:03,360 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0, corePoolSize=3, maxPoolSize=3 2024-11-22T06:02:03,360 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,360 DEBUG [RS:1;407411ea95db:41799 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/407411ea95db:0, corePoolSize=3, maxPoolSize=3 2024-11-22T06:02:03,360 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0, corePoolSize=3, maxPoolSize=3 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 DEBUG [RS:0;407411ea95db:44619 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/407411ea95db:0, corePoolSize=3, maxPoolSize=3 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 WARN [407411ea95db:40429 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/407411ea95db:0, corePoolSize=2, maxPoolSize=2 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,361 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,362 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,362 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,362 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,362 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/407411ea95db:0, corePoolSize=1, maxPoolSize=1 2024-11-22T06:02:03,362 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0, corePoolSize=3, maxPoolSize=3 2024-11-22T06:02:03,362 DEBUG [RS:2;407411ea95db:35283 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/407411ea95db:0, corePoolSize=3, maxPoolSize=3 2024-11-22T06:02:03,366 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,366 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,366 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,366 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,366 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,367 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,35283,1732255321784-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T06:02:03,367 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,367 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,367 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,367 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,367 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,368 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,368 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,368 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,368 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,41799,1732255321718-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T06:02:03,369 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,369 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,369 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,44619,1732255321573-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T06:02:03,395 INFO [RS:0;407411ea95db:44619 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T06:02:03,395 INFO [RS:2;407411ea95db:35283 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T06:02:03,395 INFO [RS:1;407411ea95db:41799 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T06:02:03,398 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,35283,1732255321784-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,398 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,44619,1732255321573-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,398 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,398 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,399 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,41799,1732255321718-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,399 INFO [RS:0;407411ea95db:44619 {}] regionserver.Replication(171): 407411ea95db,44619,1732255321573 started 2024-11-22T06:02:03,399 INFO [RS:2;407411ea95db:35283 {}] regionserver.Replication(171): 407411ea95db,35283,1732255321784 started 2024-11-22T06:02:03,399 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,399 INFO [RS:1;407411ea95db:41799 {}] regionserver.Replication(171): 407411ea95db,41799,1732255321718 started 2024-11-22T06:02:03,428 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,428 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,428 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:03,429 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1482): Serving as 407411ea95db,41799,1732255321718, RpcServer on 407411ea95db/172.17.0.2:41799, sessionid=0x101626c19a90002 2024-11-22T06:02:03,429 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(1482): Serving as 407411ea95db,35283,1732255321784, RpcServer on 407411ea95db/172.17.0.2:35283, sessionid=0x101626c19a90003 2024-11-22T06:02:03,429 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1482): Serving as 407411ea95db,44619,1732255321573, RpcServer on 407411ea95db/172.17.0.2:44619, sessionid=0x101626c19a90001 2024-11-22T06:02:03,430 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T06:02:03,430 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T06:02:03,430 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T06:02:03,430 DEBUG [RS:2;407411ea95db:35283 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 407411ea95db,35283,1732255321784 2024-11-22T06:02:03,430 DEBUG [RS:1;407411ea95db:41799 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 407411ea95db,41799,1732255321718 2024-11-22T06:02:03,430 DEBUG [RS:0;407411ea95db:44619 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 407411ea95db,44619,1732255321573 2024-11-22T06:02:03,430 DEBUG [RS:2;407411ea95db:35283 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '407411ea95db,35283,1732255321784' 2024-11-22T06:02:03,430 DEBUG [RS:1;407411ea95db:41799 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '407411ea95db,41799,1732255321718' 2024-11-22T06:02:03,430 DEBUG [RS:2;407411ea95db:35283 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T06:02:03,430 DEBUG [RS:1;407411ea95db:41799 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T06:02:03,431 DEBUG [RS:0;407411ea95db:44619 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '407411ea95db,44619,1732255321573' 2024-11-22T06:02:03,431 DEBUG [RS:0;407411ea95db:44619 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T06:02:03,432 DEBUG [RS:1;407411ea95db:41799 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T06:02:03,432 DEBUG [RS:2;407411ea95db:35283 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T06:02:03,432 DEBUG [RS:0;407411ea95db:44619 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T06:02:03,432 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T06:02:03,432 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T06:02:03,432 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T06:02:03,432 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T06:02:03,433 DEBUG [RS:1;407411ea95db:41799 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 407411ea95db,41799,1732255321718 2024-11-22T06:02:03,433 DEBUG [RS:2;407411ea95db:35283 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 407411ea95db,35283,1732255321784 2024-11-22T06:02:03,433 DEBUG [RS:1;407411ea95db:41799 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '407411ea95db,41799,1732255321718' 2024-11-22T06:02:03,433 DEBUG [RS:2;407411ea95db:35283 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '407411ea95db,35283,1732255321784' 2024-11-22T06:02:03,433 DEBUG [RS:1;407411ea95db:41799 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T06:02:03,433 DEBUG [RS:2;407411ea95db:35283 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T06:02:03,433 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T06:02:03,433 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T06:02:03,433 DEBUG [RS:0;407411ea95db:44619 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 407411ea95db,44619,1732255321573 2024-11-22T06:02:03,433 DEBUG [RS:0;407411ea95db:44619 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '407411ea95db,44619,1732255321573' 2024-11-22T06:02:03,433 DEBUG [RS:0;407411ea95db:44619 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T06:02:03,434 DEBUG [RS:2;407411ea95db:35283 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T06:02:03,434 DEBUG [RS:0;407411ea95db:44619 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T06:02:03,435 DEBUG [RS:2;407411ea95db:35283 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T06:02:03,435 DEBUG [RS:0;407411ea95db:44619 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T06:02:03,435 INFO [RS:2;407411ea95db:35283 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T06:02:03,435 INFO [RS:0;407411ea95db:44619 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T06:02:03,435 INFO [RS:0;407411ea95db:44619 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T06:02:03,435 INFO [RS:2;407411ea95db:35283 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T06:02:03,437 DEBUG [RS:1;407411ea95db:41799 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T06:02:03,438 DEBUG [RS:1;407411ea95db:41799 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T06:02:03,438 INFO [RS:1;407411ea95db:41799 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T06:02:03,438 INFO [RS:1;407411ea95db:41799 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T06:02:03,541 INFO [RS:2;407411ea95db:35283 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T06:02:03,541 INFO [RS:1;407411ea95db:41799 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T06:02:03,542 INFO [RS:0;407411ea95db:44619 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T06:02:03,544 INFO [RS:2;407411ea95db:35283 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=407411ea95db%2C35283%2C1732255321784, suffix=, logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,35283,1732255321784, archiveDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs, maxLogs=32 2024-11-22T06:02:03,544 INFO [RS:1;407411ea95db:41799 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=407411ea95db%2C41799%2C1732255321718, suffix=, logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,41799,1732255321718, archiveDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs, maxLogs=32 2024-11-22T06:02:03,545 INFO [RS:0;407411ea95db:44619 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=407411ea95db%2C44619%2C1732255321573, suffix=, logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,44619,1732255321573, archiveDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs, maxLogs=32 2024-11-22T06:02:03,569 DEBUG [RS:1;407411ea95db:41799 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,41799,1732255321718/407411ea95db%2C41799%2C1732255321718.1732255323550, exclude list is [], retry=0 2024-11-22T06:02:03,572 DEBUG [RS:0;407411ea95db:44619 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,44619,1732255321573/407411ea95db%2C44619%2C1732255321573.1732255323548, exclude list is [], retry=0 2024-11-22T06:02:03,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35893,DS-08cd23bf-0e4d-49b5-a7a5-1207b139cd06,DISK] 2024-11-22T06:02:03,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43209,DS-0c52a8d8-8d32-460f-a6ef-8426314f1096,DISK] 2024-11-22T06:02:03,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43345,DS-0b5581d3-dd67-41a8-861e-1dad663ec757,DISK] 2024-11-22T06:02:03,580 DEBUG [RS:2;407411ea95db:35283 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,35283,1732255321784/407411ea95db%2C35283%2C1732255321784.1732255323550, exclude list is [], retry=0 2024-11-22T06:02:03,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35893,DS-08cd23bf-0e4d-49b5-a7a5-1207b139cd06,DISK] 2024-11-22T06:02:03,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43209,DS-0c52a8d8-8d32-460f-a6ef-8426314f1096,DISK] 2024-11-22T06:02:03,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43345,DS-0b5581d3-dd67-41a8-861e-1dad663ec757,DISK] 2024-11-22T06:02:03,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43345,DS-0b5581d3-dd67-41a8-861e-1dad663ec757,DISK] 2024-11-22T06:02:03,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43209,DS-0c52a8d8-8d32-460f-a6ef-8426314f1096,DISK] 2024-11-22T06:02:03,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35893,DS-08cd23bf-0e4d-49b5-a7a5-1207b139cd06,DISK] 2024-11-22T06:02:03,644 INFO [RS:1;407411ea95db:41799 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,41799,1732255321718/407411ea95db%2C41799%2C1732255321718.1732255323550 2024-11-22T06:02:03,652 INFO [RS:0;407411ea95db:44619 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,44619,1732255321573/407411ea95db%2C44619%2C1732255321573.1732255323548 2024-11-22T06:02:03,655 DEBUG [RS:1;407411ea95db:41799 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40289:40289),(127.0.0.1/127.0.0.1:35589:35589),(127.0.0.1/127.0.0.1:45629:45629)] 2024-11-22T06:02:03,670 INFO [RS:2;407411ea95db:35283 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,35283,1732255321784/407411ea95db%2C35283%2C1732255321784.1732255323550 2024-11-22T06:02:03,675 DEBUG [RS:0;407411ea95db:44619 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35589:35589),(127.0.0.1/127.0.0.1:45629:45629),(127.0.0.1/127.0.0.1:40289:40289)] 2024-11-22T06:02:03,675 DEBUG [RS:2;407411ea95db:35283 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45629:45629),(127.0.0.1/127.0.0.1:35589:35589),(127.0.0.1/127.0.0.1:40289:40289)] 2024-11-22T06:02:03,864 DEBUG [407411ea95db:40429 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-22T06:02:03,875 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:02:03,884 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:02:03,884 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:02:03,884 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:02:03,884 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:02:03,884 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:02:03,884 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:02:03,884 INFO [407411ea95db:40429 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:02:03,884 INFO [407411ea95db:40429 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:02:03,884 INFO [407411ea95db:40429 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:02:03,884 DEBUG [407411ea95db:40429 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:02:03,897 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:03,906 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 407411ea95db,41799,1732255321718, state=OPENING 2024-11-22T06:02:03,944 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T06:02:03,953 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:03,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:03,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:03,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:03,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:03,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:03,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:03,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:03,959 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T06:02:03,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:02:04,140 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T06:02:04,143 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45535, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T06:02:04,156 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T06:02:04,156 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T06:02:04,157 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-22T06:02:04,161 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=407411ea95db%2C41799%2C1732255321718.meta, suffix=.meta, logDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,41799,1732255321718, archiveDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs, maxLogs=32 2024-11-22T06:02:04,179 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,41799,1732255321718/407411ea95db%2C41799%2C1732255321718.meta.1732255324163.meta, exclude list is [], retry=0 2024-11-22T06:02:04,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35893,DS-08cd23bf-0e4d-49b5-a7a5-1207b139cd06,DISK] 2024-11-22T06:02:04,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43345,DS-0b5581d3-dd67-41a8-861e-1dad663ec757,DISK] 2024-11-22T06:02:04,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43209,DS-0c52a8d8-8d32-460f-a6ef-8426314f1096,DISK] 2024-11-22T06:02:04,198 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/WALs/407411ea95db,41799,1732255321718/407411ea95db%2C41799%2C1732255321718.meta.1732255324163.meta 2024-11-22T06:02:04,199 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40289:40289),(127.0.0.1/127.0.0.1:45629:45629),(127.0.0.1/127.0.0.1:35589:35589)] 2024-11-22T06:02:04,199 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T06:02:04,201 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-22T06:02:04,202 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:04,203 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T06:02:04,205 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T06:02:04,207 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T06:02:04,216 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T06:02:04,217 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:04,217 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T06:02:04,217 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T06:02:04,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T06:02:04,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T06:02:04,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:04,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:04,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T06:02:04,228 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T06:02:04,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:04,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:04,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T06:02:04,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T06:02:04,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:04,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:04,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T06:02:04,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T06:02:04,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:04,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T06:02:04,237 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T06:02:04,239 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740 2024-11-22T06:02:04,243 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740 2024-11-22T06:02:04,246 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T06:02:04,246 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T06:02:04,247 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T06:02:04,250 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T06:02:04,252 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66518136, jitterRate=-0.008802533149719238}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T06:02:04,253 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T06:02:04,257 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732255324218Writing region info on filesystem at 1732255324218Initializing all the Stores at 1732255324221 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255324221Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255324221Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255324221Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255324221Cleaning up temporary data from old regions at 1732255324246 (+25 ms)Running coprocessor post-open hooks at 1732255324253 (+7 ms)Region opened successfully at 1732255324257 (+4 ms) 2024-11-22T06:02:04,266 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732255324131 2024-11-22T06:02:04,279 DEBUG [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T06:02:04,279 INFO [RS_OPEN_META-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T06:02:04,282 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:04,284 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 407411ea95db,41799,1732255321718, state=OPEN 2024-11-22T06:02:04,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T06:02:04,336 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T06:02:04,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T06:02:04,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T06:02:04,336 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:04,336 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:04,336 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:04,337 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T06:02:04,337 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=407411ea95db,41799,1732255321718 2024-11-22T06:02:04,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T06:02:04,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=407411ea95db,41799,1732255321718 in 375 msec 2024-11-22T06:02:04,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T06:02:04,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.1500 sec 2024-11-22T06:02:04,353 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T06:02:04,353 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T06:02:04,373 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:04,375 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:04,395 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:04,398 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36501, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:04,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4860 sec 2024-11-22T06:02:04,434 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732255324434, completionTime=-1 2024-11-22T06:02:04,437 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-22T06:02:04,438 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T06:02:04,475 INFO [master/407411ea95db:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-22T06:02:04,476 INFO [master/407411ea95db:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732255384476 2024-11-22T06:02:04,476 INFO [master/407411ea95db:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732255444476 2024-11-22T06:02:04,476 INFO [master/407411ea95db:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 38 msec 2024-11-22T06:02:04,478 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:02:04,490 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,40429,1732255320691-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:04,491 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,40429,1732255320691-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:04,491 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,40429,1732255320691-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:04,493 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-407411ea95db:40429, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:04,494 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:04,500 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:04,505 DEBUG [master/407411ea95db:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T06:02:04,551 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.645sec 2024-11-22T06:02:04,560 INFO [master/407411ea95db:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T06:02:04,563 INFO [master/407411ea95db:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T06:02:04,564 INFO [master/407411ea95db:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T06:02:04,565 INFO [master/407411ea95db:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T06:02:04,566 INFO [master/407411ea95db:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T06:02:04,567 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,40429,1732255320691-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T06:02:04,567 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,40429,1732255320691-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T06:02:04,599 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T06:02:04,599 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 407411ea95db,40429,1732255320691 2024-11-22T06:02:04,601 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2881c946 2024-11-22T06:02:04,603 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T06:02:04,605 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41669, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T06:02:04,611 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T06:02:04,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-22T06:02:04,635 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:02:04,636 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:04,641 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:02:04,643 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-22T06:02:04,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc69675, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:04,653 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T06:02:04,653 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T06:02:04,659 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:04,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T06:02:04,663 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:04,683 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:04,687 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:04,688 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:04,688 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@283475b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:04,688 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:04,692 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:04,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741837_1013 (size=349) 2024-11-22T06:02:04,694 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:04,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741837_1013 (size=349) 2024-11-22T06:02:04,700 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32812, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:04,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3479bc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:04,704 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741837_1013 (size=349) 2024-11-22T06:02:04,713 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:04,713 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:04,717 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45772, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:04,717 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c2a1af531af5dfa747044be7409c1d27, NAME => 'hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:04,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=407411ea95db,40429,1732255320691 2024-11-22T06:02:04,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-22T06:02:04,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/test.cache.data in system properties and HBase conf 2024-11-22T06:02:04,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T06:02:04,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir in system properties and HBase conf 2024-11-22T06:02:04,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T06:02:04,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T06:02:04,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T06:02:04,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T06:02:04,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T06:02:04,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T06:02:04,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T06:02:04,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/nfs.dump.dir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T06:02:04,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T06:02:04,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T06:02:04,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741838_1014 (size=36) 2024-11-22T06:02:04,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741838_1014 (size=36) 2024-11-22T06:02:04,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741838_1014 (size=36) 2024-11-22T06:02:04,799 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:04,799 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing c2a1af531af5dfa747044be7409c1d27, disabling compactions & flushes 2024-11-22T06:02:04,799 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:02:04,799 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:02:04,799 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. after waiting 0 ms 2024-11-22T06:02:04,799 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:02:04,800 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:02:04,800 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for c2a1af531af5dfa747044be7409c1d27: Waiting for close lock at 1732255324799Disabling compacts and flushes for region at 1732255324799Disabling writes for close at 1732255324799Writing region close event to WAL at 1732255324800 (+1 ms)Closed at 1732255324800 2024-11-22T06:02:04,805 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:02:04,813 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732255324806"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255324806"}]},"ts":"1732255324806"} 2024-11-22T06:02:04,827 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T06:02:04,830 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:02:04,834 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255324830"}]},"ts":"1732255324830"} 2024-11-22T06:02:04,845 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-22T06:02:04,846 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:02:04,849 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:02:04,849 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:02:04,849 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:02:04,849 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:02:04,849 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:02:04,849 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:02:04,849 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:02:04,849 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:02:04,849 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:02:04,849 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:02:04,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=c2a1af531af5dfa747044be7409c1d27, ASSIGN}] 2024-11-22T06:02:04,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741839_1015 (size=592039) 2024-11-22T06:02:04,855 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=c2a1af531af5dfa747044be7409c1d27, ASSIGN 2024-11-22T06:02:04,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741839_1015 (size=592039) 2024-11-22T06:02:04,858 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=c2a1af531af5dfa747044be7409c1d27, ASSIGN; state=OFFLINE, location=407411ea95db,41799,1732255321718; forceNewPlan=false, retain=false 2024-11-22T06:02:04,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741839_1015 (size=592039) 2024-11-22T06:02:04,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741840_1016 (size=1663647) 2024-11-22T06:02:04,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741840_1016 (size=1663647) 2024-11-22T06:02:04,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741840_1016 (size=1663647) 2024-11-22T06:02:05,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T06:02:05,011 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-22T06:02:05,012 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c2a1af531af5dfa747044be7409c1d27, regionState=OPENING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:05,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=c2a1af531af5dfa747044be7409c1d27, ASSIGN because future has completed 2024-11-22T06:02:05,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c2a1af531af5dfa747044be7409c1d27, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:02:05,267 INFO [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:02:05,268 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c2a1af531af5dfa747044be7409c1d27, NAME => 'hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27.', STARTKEY => '', ENDKEY => ''} 2024-11-22T06:02:05,269 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. service=AccessControlService 2024-11-22T06:02:05,269 INFO [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:05,269 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,270 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:05,270 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,270 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,291 INFO [StoreOpener-c2a1af531af5dfa747044be7409c1d27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,297 INFO [StoreOpener-c2a1af531af5dfa747044be7409c1d27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2a1af531af5dfa747044be7409c1d27 columnFamilyName l 2024-11-22T06:02:05,297 DEBUG [StoreOpener-c2a1af531af5dfa747044be7409c1d27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:05,299 INFO [StoreOpener-c2a1af531af5dfa747044be7409c1d27-1 {}] regionserver.HStore(327): Store=c2a1af531af5dfa747044be7409c1d27/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:05,299 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,302 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,303 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,305 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,305 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T06:02:05,317 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,324 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:05,327 INFO [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened c2a1af531af5dfa747044be7409c1d27; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68017444, jitterRate=0.013538897037506104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:02:05,327 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:02:05,330 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c2a1af531af5dfa747044be7409c1d27: Running coprocessor pre-open hook at 1732255325271Writing region info on filesystem at 1732255325271Initializing all the Stores at 1732255325275 (+4 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732255325275Cleaning up temporary data from old regions at 1732255325305 (+30 ms)Running coprocessor post-open hooks at 1732255325327 (+22 ms)Region opened successfully at 1732255325330 (+3 ms) 2024-11-22T06:02:05,333 INFO [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., pid=6, masterSystemTime=1732255325220 2024-11-22T06:02:05,337 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:02:05,337 INFO [RS_OPEN_PRIORITY_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:02:05,339 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c2a1af531af5dfa747044be7409c1d27, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:05,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c2a1af531af5dfa747044be7409c1d27, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:02:05,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T06:02:05,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c2a1af531af5dfa747044be7409c1d27, server=407411ea95db,41799,1732255321718 in 328 msec 2024-11-22T06:02:05,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T06:02:05,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=c2a1af531af5dfa747044be7409c1d27, ASSIGN in 511 msec 2024-11-22T06:02:05,372 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:02:05,373 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255325372"}]},"ts":"1732255325372"} 2024-11-22T06:02:05,379 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-22T06:02:05,383 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:02:05,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 765 msec 2024-11-22T06:02:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T06:02:05,821 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-22T06:02:05,839 DEBUG [master/407411ea95db:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T06:02:05,840 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T06:02:05,840 INFO [master/407411ea95db:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=407411ea95db,40429,1732255320691-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T06:02:06,757 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:02:06,870 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:02:07,132 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:02:07,157 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-22T06:02:07,158 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:02:07,171 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:02:07,171 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:02:07,171 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T06:02:07,175 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2257d98e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:02:07,176 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68345ece{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-22T06:02:07,189 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:02:07,189 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:02:07,189 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T06:02:07,194 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:02:07,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7147d60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:02:07,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a8bd20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-22T06:02:07,346 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-22T06:02:07,347 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-22T06:02:07,347 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-22T06:02:07,350 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-22T06:02:07,415 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:07,827 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:08,255 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:08,308 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d75379e{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-37047-hadoop-yarn-common-3_4_1_jar-_-any-4302736392216808178/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-22T06:02:08,310 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@96ef0{HTTP/1.1, (http/1.1)}{localhost:37047} 2024-11-22T06:02:08,310 INFO [Time-limited test {}] server.Server(415): Started @16202ms 2024-11-22T06:02:08,320 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61cf6e04{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-32861-hadoop-yarn-common-3_4_1_jar-_-any-1804536012260241409/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-22T06:02:08,321 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@63fe27c{HTTP/1.1, (http/1.1)}{localhost:32861} 2024-11-22T06:02:08,321 INFO [Thread-383 {}] server.Server(415): Started @16213ms 2024-11-22T06:02:08,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741841_1017 (size=5) 2024-11-22T06:02:08,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741841_1017 (size=5) 2024-11-22T06:02:08,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741841_1017 (size=5) 2024-11-22T06:02:09,566 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-22T06:02:09,574 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:02:09,626 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-22T06:02:09,628 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:02:09,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:02:09,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:02:09,663 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T06:02:09,667 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:02:09,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54218519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:02:09,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fd06f3c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-22T06:02:09,741 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:02:09,756 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-22T06:02:09,756 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-22T06:02:09,756 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-22T06:02:09,756 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-22T06:02:09,778 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:09,822 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:09,869 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T06:02:09,872 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-22T06:02:09,962 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:09,973 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@639863e9{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-38343-hadoop-yarn-common-3_4_1_jar-_-any-4829529876067246734/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-22T06:02:09,974 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57cb55f8{HTTP/1.1, (http/1.1)}{localhost:38343} 2024-11-22T06:02:09,974 INFO [Time-limited test {}] server.Server(415): Started @17867ms 2024-11-22T06:02:10,197 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-22T06:02:10,199 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:02:10,214 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-22T06:02:10,215 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T06:02:10,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T06:02:10,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T06:02:10,220 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T06:02:10,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T06:02:10,223 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47b809eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,AVAILABLE} 2024-11-22T06:02:10,223 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c4deda7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-22T06:02:10,292 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-22T06:02:10,292 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-22T06:02:10,293 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-22T06:02:10,293 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-22T06:02:10,319 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:10,328 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:10,449 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-22T06:02:10,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a2c53a0{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/java.io.tmpdir/jetty-localhost-35293-hadoop-yarn-common-3_4_1_jar-_-any-8960937640628665144/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-22T06:02:10,453 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a18dd6d{HTTP/1.1, (http/1.1)}{localhost:35293} 2024-11-22T06:02:10,453 INFO [Time-limited test {}] server.Server(415): Started @18346ms 2024-11-22T06:02:10,490 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-22T06:02:10,491 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:10,531 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=718, OpenFileDescriptor=780, MaxFileDescriptor=1048576, SystemLoadAverage=388, ProcessCount=11, AvailableMemoryMB=7505 2024-11-22T06:02:10,533 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=718 is superior to 500 2024-11-22T06:02:10,539 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T06:02:10,545 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 407411ea95db,40429,1732255320691 2024-11-22T06:02:10,545 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5da87896 2024-11-22T06:02:10,545 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T06:02:10,548 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T06:02:10,549 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:02:10,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:10,554 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:02:10,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-22T06:02:10,556 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:10,559 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:02:10,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T06:02:10,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741842_1018 (size=406) 2024-11-22T06:02:10,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741842_1018 (size=406) 2024-11-22T06:02:10,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741842_1018 (size=406) 2024-11-22T06:02:10,594 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 39607123315dab659c600f300343fbef, NAME => 'testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:10,606 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c8c8ffa80b9454c9f2701d6367bc0c99, NAME => 'testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:10,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741843_1019 (size=67) 2024-11-22T06:02:10,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741843_1019 (size=67) 2024-11-22T06:02:10,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741843_1019 (size=67) 2024-11-22T06:02:10,645 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:10,646 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 39607123315dab659c600f300343fbef, disabling compactions & flushes 2024-11-22T06:02:10,646 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:10,646 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:10,646 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. after waiting 0 ms 2024-11-22T06:02:10,646 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:10,646 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:10,646 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 39607123315dab659c600f300343fbef: Waiting for close lock at 1732255330646Disabling compacts and flushes for region at 1732255330646Disabling writes for close at 1732255330646Writing region close event to WAL at 1732255330646Closed at 1732255330646 2024-11-22T06:02:10,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741844_1020 (size=67) 2024-11-22T06:02:10,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741844_1020 (size=67) 2024-11-22T06:02:10,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741844_1020 (size=67) 2024-11-22T06:02:10,659 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:10,660 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing c8c8ffa80b9454c9f2701d6367bc0c99, disabling compactions & flushes 2024-11-22T06:02:10,660 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:10,660 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:10,660 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. after waiting 0 ms 2024-11-22T06:02:10,660 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:10,660 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:10,660 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for c8c8ffa80b9454c9f2701d6367bc0c99: Waiting for close lock at 1732255330660Disabling compacts and flushes for region at 1732255330660Disabling writes for close at 1732255330660Writing region close event to WAL at 1732255330660Closed at 1732255330660 2024-11-22T06:02:10,662 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:02:10,663 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732255330662"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255330662"}]},"ts":"1732255330662"} 2024-11-22T06:02:10,663 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732255330662"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255330662"}]},"ts":"1732255330662"} 2024-11-22T06:02:10,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T06:02:10,703 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:02:10,710 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:02:10,710 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255330710"}]},"ts":"1732255330710"} 2024-11-22T06:02:10,714 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-22T06:02:10,714 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:02:10,716 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:02:10,716 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:02:10,716 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:02:10,717 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:02:10,717 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:02:10,717 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:02:10,717 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:02:10,717 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:02:10,717 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:02:10,717 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:02:10,717 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, ASSIGN}] 2024-11-22T06:02:10,720 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, ASSIGN 2024-11-22T06:02:10,721 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, ASSIGN 2024-11-22T06:02:10,723 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:02:10,723 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:02:10,873 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:02:10,874 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=39607123315dab659c600f300343fbef, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:02:10,874 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=c8c8ffa80b9454c9f2701d6367bc0c99, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:02:10,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, ASSIGN because future has completed 2024-11-22T06:02:10,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T06:02:10,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:02:10,882 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, ASSIGN because future has completed 2024-11-22T06:02:10,885 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39607123315dab659c600f300343fbef, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:02:11,038 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T06:02:11,044 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T06:02:11,061 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60585, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T06:02:11,067 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44563, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T06:02:11,084 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:11,085 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => c8c8ffa80b9454c9f2701d6367bc0c99, NAME => 'testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:02:11,086 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. service=AccessControlService 2024-11-22T06:02:11,086 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:11,086 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,086 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:11,086 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,087 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,091 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:11,091 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 39607123315dab659c600f300343fbef, NAME => 'testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:02:11,092 INFO [StoreOpener-c8c8ffa80b9454c9f2701d6367bc0c99-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,092 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. service=AccessControlService 2024-11-22T06:02:11,092 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:11,092 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,093 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:11,093 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,093 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,096 INFO [StoreOpener-c8c8ffa80b9454c9f2701d6367bc0c99-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8c8ffa80b9454c9f2701d6367bc0c99 columnFamilyName cf 2024-11-22T06:02:11,097 DEBUG [StoreOpener-c8c8ffa80b9454c9f2701d6367bc0c99-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:11,099 INFO [StoreOpener-c8c8ffa80b9454c9f2701d6367bc0c99-1 {}] regionserver.HStore(327): Store=c8c8ffa80b9454c9f2701d6367bc0c99/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:11,100 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,102 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,103 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,110 INFO [StoreOpener-39607123315dab659c600f300343fbef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,111 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,111 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,113 INFO [StoreOpener-39607123315dab659c600f300343fbef-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 39607123315dab659c600f300343fbef columnFamilyName cf 2024-11-22T06:02:11,113 DEBUG [StoreOpener-39607123315dab659c600f300343fbef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:11,114 INFO [StoreOpener-39607123315dab659c600f300343fbef-1 {}] regionserver.HStore(327): Store=39607123315dab659c600f300343fbef/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:11,114 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,114 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,116 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef 2024-11-22T06:02:11,117 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef 2024-11-22T06:02:11,118 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,118 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,118 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:11,121 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened c8c8ffa80b9454c9f2701d6367bc0c99; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59379959, jitterRate=-0.11516965925693512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:02:11,121 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:11,124 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for c8c8ffa80b9454c9f2701d6367bc0c99: Running coprocessor pre-open hook at 1732255331087Writing region info on filesystem at 1732255331087Initializing all the Stores at 1732255331089 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255331089Cleaning up temporary data from old regions at 1732255331111 (+22 ms)Running coprocessor post-open hooks at 1732255331121 (+10 ms)Region opened successfully at 1732255331123 (+2 ms) 2024-11-22T06:02:11,126 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:02:11,127 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-22T06:02:11,128 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T06:02:11,128 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T06:02:11,130 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:02:11,130 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-22T06:02:11,130 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T06:02:11,130 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T06:02:11,132 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-22T06:02:11,132 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-22T06:02:11,133 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:02:11,134 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-22T06:02:11,134 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-22T06:02:11,134 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-22T06:02:11,134 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T06:02:11,134 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T06:02:11,134 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-22T06:02:11,135 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-22T06:02:11,136 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99., pid=10, masterSystemTime=1732255331038 2024-11-22T06:02:11,139 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:11,141 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:11,141 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:11,141 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 39607123315dab659c600f300343fbef; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64227803, jitterRate=-0.04293115437030792}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:02:11,141 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 39607123315dab659c600f300343fbef 2024-11-22T06:02:11,141 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 39607123315dab659c600f300343fbef: Running coprocessor pre-open hook at 1732255331093Writing region info on filesystem at 1732255331093Initializing all the Stores at 1732255331096 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255331096Cleaning up temporary data from old regions at 1732255331118 (+22 ms)Running coprocessor post-open hooks at 1732255331141 (+23 ms)Region opened successfully at 1732255331141 2024-11-22T06:02:11,142 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=c8c8ffa80b9454c9f2701d6367bc0c99, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:02:11,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:02:11,148 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef., pid=11, masterSystemTime=1732255331043 2024-11-22T06:02:11,153 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:11,153 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:11,159 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=39607123315dab659c600f300343fbef, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:02:11,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39607123315dab659c600f300343fbef, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:02:11,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T06:02:11,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99, server=407411ea95db,35283,1732255321784 in 273 msec 2024-11-22T06:02:11,173 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, ASSIGN in 448 msec 2024-11-22T06:02:11,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-11-22T06:02:11,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 39607123315dab659c600f300343fbef, server=407411ea95db,44619,1732255321573 in 288 msec 2024-11-22T06:02:11,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T06:02:11,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, ASSIGN in 463 msec 2024-11-22T06:02:11,187 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:02:11,188 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255331188"}]},"ts":"1732255331188"} 2024-11-22T06:02:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T06:02:11,192 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-22T06:02:11,194 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:02:11,197 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-22T06:02:11,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:11,215 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:11,215 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:11,215 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:11,217 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47813, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-22T06:02:11,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:11,222 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-22T06:02:11,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:11,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:11,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:11,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:11,307 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:11,307 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:11,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:02:11,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:11,334 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:11,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:11,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:11,336 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:11,336 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:11,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 786 msec 2024-11-22T06:02:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T06:02:11,700 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-22T06:02:11,701 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-11-22T06:02:11,702 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:11,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-11-22T06:02:11,710 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:11,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-11-22T06:02:11,714 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-22T06:02:11,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-22T06:02:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255331729 (current time:1732255331729). 2024-11-22T06:02:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:02:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-22T06:02:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:02:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eeb463e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:11,734 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:11,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:11,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:11,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0196e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:11,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:11,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:11,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:11,737 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32844, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:11,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69732503, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:11,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:11,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:11,741 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45776, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:11,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:02:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:11,751 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d854000, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:11,752 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:11,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:11,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:11,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38dd1709, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:11,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:11,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:11,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:11,756 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:11,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51307f5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:11,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:11,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:11,761 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45782, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:11,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:11,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:02:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:11,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-22T06:02:11,767 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:02:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-22T06:02:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-22T06:02:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-22T06:02:11,784 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:02:11,789 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:02:11,803 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:02:11,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741845_1021 (size=167) 2024-11-22T06:02:11,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741845_1021 (size=167) 2024-11-22T06:02:11,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741845_1021 (size=167) 2024-11-22T06:02:11,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-22T06:02:12,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-22T06:02:12,226 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:02:12,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99}] 2024-11-22T06:02:12,233 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef 2024-11-22T06:02:12,233 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:12,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-22T06:02:12,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-22T06:02:12,398 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:12,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:12,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 39607123315dab659c600f300343fbef: 2024-11-22T06:02:12,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. for emptySnaptb0-testExportWithTargetName completed. 2024-11-22T06:02:12,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for c8c8ffa80b9454c9f2701d6367bc0c99: 2024-11-22T06:02:12,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. for emptySnaptb0-testExportWithTargetName completed. 2024-11-22T06:02:12,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-22T06:02:12,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-22T06:02:12,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-22T06:02:12,414 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:12,416 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:12,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:02:12,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:02:12,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741846_1022 (size=70) 2024-11-22T06:02:12,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741846_1022 (size=70) 2024-11-22T06:02:12,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741846_1022 (size=70) 2024-11-22T06:02:12,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741847_1023 (size=70) 2024-11-22T06:02:12,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741847_1023 (size=70) 2024-11-22T06:02:12,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741847_1023 (size=70) 2024-11-22T06:02:12,498 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:12,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-22T06:02:12,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-22T06:02:12,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 39607123315dab659c600f300343fbef 2024-11-22T06:02:12,505 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef 2024-11-22T06:02:12,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef in 281 msec 2024-11-22T06:02:12,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:12,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-22T06:02:12,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-22T06:02:12,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:12,863 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:12,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-22T06:02:12,873 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:02:12,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99 in 639 msec 2024-11-22T06:02:12,876 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:02:12,881 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:02:12,888 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-22T06:02:12,892 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-22T06:02:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-22T06:02:12,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741848_1024 (size=549) 2024-11-22T06:02:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741848_1024 (size=549) 2024-11-22T06:02:12,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741848_1024 (size=549) 2024-11-22T06:02:12,962 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:02:12,997 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:02:12,998 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-22T06:02:13,002 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:02:13,002 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-22T06:02:13,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.2280 sec 2024-11-22T06:02:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-22T06:02:13,930 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-22T06:02:13,950 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='07b64985f8ed69450aa715edc42eabfa0', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:02:13,959 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='192af014312e3c89d1ed08a299b377746', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:02:13,962 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='20a5772d66b29ce973212a49b501fac43', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:02:13,964 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='34c264526a80fa0d6c6b099c26ad0cfcc', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:02:13,966 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='4570c91ee64ee4dc3f3cb7cbbaca15bcd', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:02:13,968 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='5084d0a391c55a579a68e031fbe51849a', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:02:13,971 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:13,978 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:13,979 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46062, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:13,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:02:13,982 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:13,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:02:13,993 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-22T06:02:14,006 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-22T06:02:14,007 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:14,008 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:14,013 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-22T06:02:14,032 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-22T06:02:14,058 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-22T06:02:14,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-22T06:02:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255334065 (current time:1732255334065). 2024-11-22T06:02:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:02:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-22T06:02:14,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:02:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@397862df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:14,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:14,070 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13dcabef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:14,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:14,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:14,076 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:14,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e782c5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:14,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:14,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:14,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:14,084 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:14,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:02:14,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:14,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:14,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:14,087 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f6054f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:14,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:14,090 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:14,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:14,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:14,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ef69f38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:14,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:14,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:14,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:14,092 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44792, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:14,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55132715, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:14,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:14,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:14,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:14,096 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41776, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:14,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:14,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:02:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:14,101 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-22T06:02:14,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:02:14,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-22T06:02:14,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-22T06:02:14,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-22T06:02:14,105 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:02:14,107 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:02:14,110 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:02:14,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741849_1025 (size=162) 2024-11-22T06:02:14,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741849_1025 (size=162) 2024-11-22T06:02:14,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741849_1025 (size=162) 2024-11-22T06:02:14,153 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:02:14,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99}] 2024-11-22T06:02:14,159 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:14,160 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef 2024-11-22T06:02:14,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-22T06:02:14,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-22T06:02:14,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-22T06:02:14,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:14,322 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing c8c8ffa80b9454c9f2701d6367bc0c99 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-22T06:02:14,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:14,325 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 39607123315dab659c600f300343fbef 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-22T06:02:14,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-22T06:02:14,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/.tmp/cf/6c34273e7aa94037a9bfd7362c684e8c is 69, key is 07b64985f8ed69450aa715edc42eabfa0/cf:q/1732255333979/Put/seqid=0 2024-11-22T06:02:14,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/.tmp/cf/a12160216bdc45a29ff195023dc6e3b5 is 71, key is 1323ed9c9fea969130faaf286c9d91b9/cf:q/1732255333988/Put/seqid=0 2024-11-22T06:02:14,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741850_1026 (size=5149) 2024-11-22T06:02:14,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741850_1026 (size=5149) 2024-11-22T06:02:14,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741850_1026 (size=5149) 2024-11-22T06:02:14,511 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/.tmp/cf/6c34273e7aa94037a9bfd7362c684e8c 2024-11-22T06:02:14,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741851_1027 (size=8460) 2024-11-22T06:02:14,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741851_1027 (size=8460) 2024-11-22T06:02:14,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741851_1027 (size=8460) 2024-11-22T06:02:14,553 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/.tmp/cf/a12160216bdc45a29ff195023dc6e3b5 2024-11-22T06:02:14,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/.tmp/cf/6c34273e7aa94037a9bfd7362c684e8c as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/cf/6c34273e7aa94037a9bfd7362c684e8c 2024-11-22T06:02:14,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/.tmp/cf/a12160216bdc45a29ff195023dc6e3b5 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/cf/a12160216bdc45a29ff195023dc6e3b5 2024-11-22T06:02:14,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/cf/a12160216bdc45a29ff195023dc6e3b5, entries=49, sequenceid=6, filesize=8.3 K 2024-11-22T06:02:14,655 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/cf/6c34273e7aa94037a9bfd7362c684e8c, entries=1, sequenceid=6, filesize=5.0 K 2024-11-22T06:02:14,674 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 39607123315dab659c600f300343fbef in 338ms, sequenceid=6, compaction requested=false 2024-11-22T06:02:14,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-22T06:02:14,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 39607123315dab659c600f300343fbef: 2024-11-22T06:02:14,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. for snaptb0-testExportWithTargetName completed. 2024-11-22T06:02:14,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-22T06:02:14,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:14,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/cf/6c34273e7aa94037a9bfd7362c684e8c] hfiles 2024-11-22T06:02:14,679 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for c8c8ffa80b9454c9f2701d6367bc0c99 in 349ms, sequenceid=6, compaction requested=false 2024-11-22T06:02:14,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/cf/6c34273e7aa94037a9bfd7362c684e8c for snapshot=snaptb0-testExportWithTargetName 2024-11-22T06:02:14,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for c8c8ffa80b9454c9f2701d6367bc0c99: 2024-11-22T06:02:14,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. for snaptb0-testExportWithTargetName completed. 2024-11-22T06:02:14,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-22T06:02:14,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:14,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/cf/a12160216bdc45a29ff195023dc6e3b5] hfiles 2024-11-22T06:02:14,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/cf/a12160216bdc45a29ff195023dc6e3b5 for snapshot=snaptb0-testExportWithTargetName 2024-11-22T06:02:14,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741852_1028 (size=109) 2024-11-22T06:02:14,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741852_1028 (size=109) 2024-11-22T06:02:14,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741852_1028 (size=109) 2024-11-22T06:02:14,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:14,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-22T06:02:14,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-22T06:02:14,730 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 39607123315dab659c600f300343fbef 2024-11-22T06:02:14,730 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef 2024-11-22T06:02:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-22T06:02:14,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741853_1029 (size=109) 2024-11-22T06:02:14,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741853_1029 (size=109) 2024-11-22T06:02:14,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741853_1029 (size=109) 2024-11-22T06:02:14,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:14,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-22T06:02:14,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-22T06:02:14,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:14,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39607123315dab659c600f300343fbef in 579 msec 2024-11-22T06:02:14,747 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:14,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-22T06:02:14,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99 in 597 msec 2024-11-22T06:02:14,756 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:02:14,758 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:02:14,760 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:02:14,760 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-22T06:02:14,761 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-22T06:02:14,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741854_1030 (size=627) 2024-11-22T06:02:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741854_1030 (size=627) 2024-11-22T06:02:14,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741854_1030 (size=627) 2024-11-22T06:02:14,823 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:02:14,846 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:02:14,847 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-22T06:02:14,860 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:02:14,860 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-22T06:02:14,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 758 msec 2024-11-22T06:02:15,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-22T06:02:15,240 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-22T06:02:15,241 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240 2024-11-22T06:02:15,241 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:15,281 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:15,281 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-22T06:02:15,286 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:02:15,296 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-22T06:02:15,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741855_1031 (size=162) 2024-11-22T06:02:15,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741855_1031 (size=162) 2024-11-22T06:02:15,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741855_1031 (size=162) 2024-11-22T06:02:15,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741856_1032 (size=627) 2024-11-22T06:02:15,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741856_1032 (size=627) 2024-11-22T06:02:15,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741856_1032 (size=627) 2024-11-22T06:02:15,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741857_1033 (size=154) 2024-11-22T06:02:15,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741857_1033 (size=154) 2024-11-22T06:02:15,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741857_1033 (size=154) 2024-11-22T06:02:15,386 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:15,386 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:15,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,590 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:02:16,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-12993716427064984576.jar 2024-11-22T06:02:16,704 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,705 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,791 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-2757220125539115636.jar 2024-11-22T06:02:16,792 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,793 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,794 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,794 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:16,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:02:16,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:02:16,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:02:16,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:02:16,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:02:16,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:02:16,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:02:16,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:02:16,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:02:16,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:02:16,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:02:16,804 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:16,805 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:16,805 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:02:16,806 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:16,806 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:16,807 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:02:16,807 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:02:17,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741858_1034 (size=131440) 2024-11-22T06:02:17,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741858_1034 (size=131440) 2024-11-22T06:02:17,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741858_1034 (size=131440) 2024-11-22T06:02:17,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741859_1035 (size=4188619) 2024-11-22T06:02:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741859_1035 (size=4188619) 2024-11-22T06:02:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741859_1035 (size=4188619) 2024-11-22T06:02:17,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741860_1036 (size=1323991) 2024-11-22T06:02:17,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741860_1036 (size=1323991) 2024-11-22T06:02:17,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741860_1036 (size=1323991) 2024-11-22T06:02:17,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741861_1037 (size=903736) 2024-11-22T06:02:17,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741861_1037 (size=903736) 2024-11-22T06:02:17,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741861_1037 (size=903736) 2024-11-22T06:02:17,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741862_1038 (size=8360083) 2024-11-22T06:02:17,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741862_1038 (size=8360083) 2024-11-22T06:02:17,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741862_1038 (size=8360083) 2024-11-22T06:02:17,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741863_1039 (size=1877034) 2024-11-22T06:02:17,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741863_1039 (size=1877034) 2024-11-22T06:02:17,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741863_1039 (size=1877034) 2024-11-22T06:02:17,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741864_1040 (size=77835) 2024-11-22T06:02:17,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741864_1040 (size=77835) 2024-11-22T06:02:17,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741864_1040 (size=77835) 2024-11-22T06:02:17,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741865_1041 (size=30949) 2024-11-22T06:02:17,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741865_1041 (size=30949) 2024-11-22T06:02:17,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741865_1041 (size=30949) 2024-11-22T06:02:17,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741866_1042 (size=1597347) 2024-11-22T06:02:17,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741866_1042 (size=1597347) 2024-11-22T06:02:17,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741866_1042 (size=1597347) 2024-11-22T06:02:17,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741867_1043 (size=4695811) 2024-11-22T06:02:17,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741867_1043 (size=4695811) 2024-11-22T06:02:17,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741867_1043 (size=4695811) 2024-11-22T06:02:17,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741868_1044 (size=440656) 2024-11-22T06:02:17,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741868_1044 (size=440656) 2024-11-22T06:02:17,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741868_1044 (size=440656) 2024-11-22T06:02:17,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741869_1045 (size=232957) 2024-11-22T06:02:17,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741869_1045 (size=232957) 2024-11-22T06:02:17,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741869_1045 (size=232957) 2024-11-22T06:02:17,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741870_1046 (size=127628) 2024-11-22T06:02:17,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741870_1046 (size=127628) 2024-11-22T06:02:17,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741870_1046 (size=127628) 2024-11-22T06:02:18,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741871_1047 (size=20406) 2024-11-22T06:02:18,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741871_1047 (size=20406) 2024-11-22T06:02:18,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741871_1047 (size=20406) 2024-11-22T06:02:18,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741872_1048 (size=5175431) 2024-11-22T06:02:18,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741872_1048 (size=5175431) 2024-11-22T06:02:18,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741872_1048 (size=5175431) 2024-11-22T06:02:18,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741873_1049 (size=6424744) 2024-11-22T06:02:18,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741873_1049 (size=6424744) 2024-11-22T06:02:18,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741873_1049 (size=6424744) 2024-11-22T06:02:18,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741874_1050 (size=217634) 2024-11-22T06:02:18,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741874_1050 (size=217634) 2024-11-22T06:02:18,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741874_1050 (size=217634) 2024-11-22T06:02:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741875_1051 (size=1832290) 2024-11-22T06:02:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741875_1051 (size=1832290) 2024-11-22T06:02:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741875_1051 (size=1832290) 2024-11-22T06:02:18,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741876_1052 (size=322274) 2024-11-22T06:02:18,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741876_1052 (size=322274) 2024-11-22T06:02:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741876_1052 (size=322274) 2024-11-22T06:02:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741877_1053 (size=503880) 2024-11-22T06:02:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741877_1053 (size=503880) 2024-11-22T06:02:18,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741877_1053 (size=503880) 2024-11-22T06:02:18,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741878_1054 (size=29229) 2024-11-22T06:02:18,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741878_1054 (size=29229) 2024-11-22T06:02:18,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741878_1054 (size=29229) 2024-11-22T06:02:18,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741879_1055 (size=24096) 2024-11-22T06:02:18,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741879_1055 (size=24096) 2024-11-22T06:02:18,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741879_1055 (size=24096) 2024-11-22T06:02:18,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741880_1056 (size=111872) 2024-11-22T06:02:18,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741880_1056 (size=111872) 2024-11-22T06:02:18,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741880_1056 (size=111872) 2024-11-22T06:02:18,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741881_1057 (size=45609) 2024-11-22T06:02:18,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741881_1057 (size=45609) 2024-11-22T06:02:18,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741881_1057 (size=45609) 2024-11-22T06:02:18,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741882_1058 (size=136454) 2024-11-22T06:02:18,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741882_1058 (size=136454) 2024-11-22T06:02:18,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741882_1058 (size=136454) 2024-11-22T06:02:18,767 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:02:18,789 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-22T06:02:18,800 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:02:18,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741883_1059 (size=342) 2024-11-22T06:02:18,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741883_1059 (size=342) 2024-11-22T06:02:18,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741883_1059 (size=342) 2024-11-22T06:02:18,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741884_1060 (size=15) 2024-11-22T06:02:18,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741884_1060 (size=15) 2024-11-22T06:02:18,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741884_1060 (size=15) 2024-11-22T06:02:19,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741885_1061 (size=303737) 2024-11-22T06:02:19,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741885_1061 (size=303737) 2024-11-22T06:02:19,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741885_1061 (size=303737) 2024-11-22T06:02:19,685 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:02:19,686 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:02:20,145 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0001_000001 (auth:SIMPLE) from 127.0.0.1:59494 2024-11-22T06:02:28,051 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0001_000001 (auth:SIMPLE) from 127.0.0.1:56872 2024-11-22T06:02:28,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741886_1062 (size=349387) 2024-11-22T06:02:28,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741886_1062 (size=349387) 2024-11-22T06:02:28,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741886_1062 (size=349387) 2024-11-22T06:02:29,720 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:02:30,505 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0001_000001 (auth:SIMPLE) from 127.0.0.1:57084 2024-11-22T06:02:37,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741887_1063 (size=8460) 2024-11-22T06:02:37,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741887_1063 (size=8460) 2024-11-22T06:02:37,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741887_1063 (size=8460) 2024-11-22T06:02:37,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741888_1064 (size=5149) 2024-11-22T06:02:37,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741888_1064 (size=5149) 2024-11-22T06:02:37,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741888_1064 (size=5149) 2024-11-22T06:02:38,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741889_1065 (size=17419) 2024-11-22T06:02:38,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741889_1065 (size=17419) 2024-11-22T06:02:38,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741889_1065 (size=17419) 2024-11-22T06:02:38,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741890_1066 (size=464) 2024-11-22T06:02:38,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741890_1066 (size=464) 2024-11-22T06:02:38,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741890_1066 (size=464) 2024-11-22T06:02:38,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741891_1067 (size=17419) 2024-11-22T06:02:38,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741891_1067 (size=17419) 2024-11-22T06:02:38,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741891_1067 (size=17419) 2024-11-22T06:02:38,118 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0001/container_1732255328437_0001_01_000002/launch_container.sh] 2024-11-22T06:02:38,118 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0001/container_1732255328437_0001_01_000002/container_tokens] 2024-11-22T06:02:38,118 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0001/container_1732255328437_0001_01_000002/sysfs] 2024-11-22T06:02:38,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741892_1068 (size=349387) 2024-11-22T06:02:38,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741892_1068 (size=349387) 2024-11-22T06:02:38,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741892_1068 (size=349387) 2024-11-22T06:02:40,059 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:02:40,060 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:02:40,078 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-22T06:02:40,078 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:02:40,079 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:02:40,079 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-22T06:02:40,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-22T06:02:40,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-22T06:02:40,080 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240/.hbase-snapshot/testExportWithTargetName 2024-11-22T06:02:40,081 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-22T06:02:40,081 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255335240/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-22T06:02:40,097 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-22T06:02:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-22T06:02:40,107 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255360107"}]},"ts":"1732255360107"} 2024-11-22T06:02:40,109 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-22T06:02:40,109 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-22T06:02:40,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-22T06:02:40,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, UNASSIGN}] 2024-11-22T06:02:40,119 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, UNASSIGN 2024-11-22T06:02:40,119 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, UNASSIGN 2024-11-22T06:02:40,122 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=39607123315dab659c600f300343fbef, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:02:40,122 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=c8c8ffa80b9454c9f2701d6367bc0c99, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:02:40,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, UNASSIGN because future has completed 2024-11-22T06:02:40,126 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:02:40,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39607123315dab659c600f300343fbef, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:02:40,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, UNASSIGN because future has completed 2024-11-22T06:02:40,130 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:02:40,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:02:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-22T06:02:40,284 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close 39607123315dab659c600f300343fbef 2024-11-22T06:02:40,285 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:40,285 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing c8c8ffa80b9454c9f2701d6367bc0c99, disabling compactions & flushes 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing 39607123315dab659c600f300343fbef, disabling compactions & flushes 2024-11-22T06:02:40,286 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:40,286 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. after waiting 0 ms 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. after waiting 0 ms 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:40,286 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:40,298 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:02:40,301 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:02:40,302 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:02:40,302 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:02:40,302 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99. 2024-11-22T06:02:40,302 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef. 2024-11-22T06:02:40,303 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for c8c8ffa80b9454c9f2701d6367bc0c99: Waiting for close lock at 1732255360286Running coprocessor pre-close hooks at 1732255360286Disabling compacts and flushes for region at 1732255360286Disabling writes for close at 1732255360286Writing region close event to WAL at 1732255360292 (+6 ms)Running coprocessor post-close hooks at 1732255360302 (+10 ms)Closed at 1732255360302 2024-11-22T06:02:40,303 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for 39607123315dab659c600f300343fbef: Waiting for close lock at 1732255360286Running coprocessor pre-close hooks at 1732255360286Disabling compacts and flushes for region at 1732255360286Disabling writes for close at 1732255360286Writing region close event to WAL at 1732255360288 (+2 ms)Running coprocessor post-close hooks at 1732255360300 (+12 ms)Closed at 1732255360302 (+2 ms) 2024-11-22T06:02:40,306 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed 39607123315dab659c600f300343fbef 2024-11-22T06:02:40,308 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=39607123315dab659c600f300343fbef, regionState=CLOSED 2024-11-22T06:02:40,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39607123315dab659c600f300343fbef, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:02:40,315 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:40,317 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=c8c8ffa80b9454c9f2701d6367bc0c99, regionState=CLOSED 2024-11-22T06:02:40,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:02:40,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=20 2024-11-22T06:02:40,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure 39607123315dab659c600f300343fbef, server=407411ea95db,44619,1732255321573 in 191 msec 2024-11-22T06:02:40,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=39607123315dab659c600f300343fbef, UNASSIGN in 205 msec 2024-11-22T06:02:40,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=21 2024-11-22T06:02:40,327 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure c8c8ffa80b9454c9f2701d6367bc0c99, server=407411ea95db,35283,1732255321784 in 192 msec 2024-11-22T06:02:40,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-22T06:02:40,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c8c8ffa80b9454c9f2701d6367bc0c99, UNASSIGN in 208 msec 2024-11-22T06:02:40,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-22T06:02:40,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 219 msec 2024-11-22T06:02:40,337 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255360337"}]},"ts":"1732255360337"} 2024-11-22T06:02:40,339 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-22T06:02:40,339 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-22T06:02:40,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 242 msec 2024-11-22T06:02:40,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-22T06:02:40,420 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-22T06:02:40,424 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-22T06:02:40,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:40,433 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:40,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-22T06:02:40,436 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:40,439 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-22T06:02:40,443 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef 2024-11-22T06:02:40,443 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:40,447 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/recovered.edits] 2024-11-22T06:02:40,447 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/recovered.edits] 2024-11-22T06:02:40,454 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/cf/a12160216bdc45a29ff195023dc6e3b5 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/cf/a12160216bdc45a29ff195023dc6e3b5 2024-11-22T06:02:40,454 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/cf/6c34273e7aa94037a9bfd7362c684e8c to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/cf/6c34273e7aa94037a9bfd7362c684e8c 2024-11-22T06:02:40,458 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99/recovered.edits/9.seqid 2024-11-22T06:02:40,458 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef/recovered.edits/9.seqid 2024-11-22T06:02:40,458 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/c8c8ffa80b9454c9f2701d6367bc0c99 2024-11-22T06:02:40,458 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithTargetName/39607123315dab659c600f300343fbef 2024-11-22T06:02:40,458 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-22T06:02:40,461 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:40,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41799 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-22T06:02:40,471 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-22T06:02:40,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,473 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-22T06:02:40,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-22T06:02:40,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-22T06:02:40,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-22T06:02:40,475 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-22T06:02:40,477 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:40,477 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-22T06:02:40,478 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255360477"}]},"ts":"9223372036854775807"} 2024-11-22T06:02:40,478 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255360477"}]},"ts":"9223372036854775807"} 2024-11-22T06:02:40,481 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:02:40,481 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 39607123315dab659c600f300343fbef, NAME => 'testtb-testExportWithTargetName,,1732255330549.39607123315dab659c600f300343fbef.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c8c8ffa80b9454c9f2701d6367bc0c99, NAME => 'testtb-testExportWithTargetName,1,1732255330549.c8c8ffa80b9454c9f2701d6367bc0c99.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:02:40,481 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-22T06:02:40,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,481 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,481 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:40,481 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255360481"}]},"ts":"9223372036854775807"} 2024-11-22T06:02:40,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:40,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:40,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-22T06:02:40,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-22T06:02:40,486 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-22T06:02:40,487 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-22T06:02:40,489 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 62 msec 2024-11-22T06:02:40,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-22T06:02:40,590 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-22T06:02:40,590 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-22T06:02:40,604 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-22T06:02:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-22T06:02:40,609 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-22T06:02:40,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-22T06:02:40,640 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=774 (was 718) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1055854911_1 at /127.0.0.1:36886 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:49972 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38401 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1055854911_1 at /127.0.0.1:49942 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: process reaper (pid 111702) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:57454 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33647 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36595 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:36920 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:36595 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:33647 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-1258 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 780) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=535 (was 388) - SystemLoadAverage LEAK? -, ProcessCount=16 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=5005 (was 7505) 2024-11-22T06:02:40,641 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=774 is superior to 500 2024-11-22T06:02:40,658 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=774, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=535, ProcessCount=16, AvailableMemoryMB=5003 2024-11-22T06:02:40,659 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=774 is superior to 500 2024-11-22T06:02:40,660 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:02:40,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:02:40,664 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:02:40,664 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:40,664 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-22T06:02:40,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-22T06:02:40,665 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:02:40,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741893_1069 (size=404) 2024-11-22T06:02:40,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741893_1069 (size=404) 2024-11-22T06:02:40,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741893_1069 (size=404) 2024-11-22T06:02:40,682 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1737f7a70c93ee364e2e424e900dc473, NAME => 'testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:40,683 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7651663bc25f23dec0bfbfb869dee282, NAME => 'testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:40,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741894_1070 (size=65) 2024-11-22T06:02:40,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741894_1070 (size=65) 2024-11-22T06:02:40,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741894_1070 (size=65) 2024-11-22T06:02:40,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741895_1071 (size=65) 2024-11-22T06:02:40,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741895_1071 (size=65) 2024-11-22T06:02:40,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741895_1071 (size=65) 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 7651663bc25f23dec0bfbfb869dee282, disabling compactions & flushes 2024-11-22T06:02:40,720 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 1737f7a70c93ee364e2e424e900dc473, disabling compactions & flushes 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:40,720 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. after waiting 0 ms 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:40,720 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. after waiting 0 ms 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7651663bc25f23dec0bfbfb869dee282: Waiting for close lock at 1732255360720Disabling compacts and flushes for region at 1732255360720Disabling writes for close at 1732255360720Writing region close event to WAL at 1732255360720Closed at 1732255360720 2024-11-22T06:02:40,720 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:40,720 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1737f7a70c93ee364e2e424e900dc473: Waiting for close lock at 1732255360720Disabling compacts and flushes for region at 1732255360720Disabling writes for close at 1732255360720Writing region close event to WAL at 1732255360720Closed at 1732255360720 2024-11-22T06:02:40,722 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:02:40,722 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732255360722"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255360722"}]},"ts":"1732255360722"} 2024-11-22T06:02:40,723 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732255360722"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255360722"}]},"ts":"1732255360722"} 2024-11-22T06:02:40,728 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:02:40,739 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:02:40,740 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255360739"}]},"ts":"1732255360739"} 2024-11-22T06:02:40,745 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-22T06:02:40,745 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:02:40,747 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:02:40,747 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:02:40,747 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:02:40,747 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:02:40,747 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:02:40,748 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:02:40,748 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:02:40,748 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:02:40,748 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:02:40,748 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:02:40,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, ASSIGN}] 2024-11-22T06:02:40,751 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, ASSIGN 2024-11-22T06:02:40,751 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, ASSIGN 2024-11-22T06:02:40,753 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, ASSIGN; state=OFFLINE, location=407411ea95db,41799,1732255321718; forceNewPlan=false, retain=false 2024-11-22T06:02:40,754 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:02:40,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-22T06:02:40,904 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:02:40,904 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=1737f7a70c93ee364e2e424e900dc473, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:02:40,904 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=7651663bc25f23dec0bfbfb869dee282, regionState=OPENING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:40,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, ASSIGN because future has completed 2024-11-22T06:02:40,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7651663bc25f23dec0bfbfb869dee282, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:02:40,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, ASSIGN because future has completed 2024-11-22T06:02:40,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1737f7a70c93ee364e2e424e900dc473, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:02:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-22T06:02:41,068 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:41,069 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => 7651663bc25f23dec0bfbfb869dee282, NAME => 'testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:02:41,069 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. service=AccessControlService 2024-11-22T06:02:41,069 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:41,069 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,069 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:41,069 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,069 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,071 INFO [StoreOpener-7651663bc25f23dec0bfbfb869dee282-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,074 INFO [StoreOpener-7651663bc25f23dec0bfbfb869dee282-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7651663bc25f23dec0bfbfb869dee282 columnFamilyName cf 2024-11-22T06:02:41,074 DEBUG [StoreOpener-7651663bc25f23dec0bfbfb869dee282-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:41,074 INFO [StoreOpener-7651663bc25f23dec0bfbfb869dee282-1 {}] regionserver.HStore(327): Store=7651663bc25f23dec0bfbfb869dee282/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:41,075 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,075 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:41,075 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => 1737f7a70c93ee364e2e424e900dc473, NAME => 'testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:02:41,075 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. service=AccessControlService 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,076 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,076 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,078 INFO [StoreOpener-1737f7a70c93ee364e2e424e900dc473-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,078 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,080 INFO [StoreOpener-1737f7a70c93ee364e2e424e900dc473-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1737f7a70c93ee364e2e424e900dc473 columnFamilyName cf 2024-11-22T06:02:41,080 DEBUG [StoreOpener-1737f7a70c93ee364e2e424e900dc473-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:41,081 INFO [StoreOpener-1737f7a70c93ee364e2e424e900dc473-1 {}] regionserver.HStore(327): Store=1737f7a70c93ee364e2e424e900dc473/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:41,081 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:41,081 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,082 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened 7651663bc25f23dec0bfbfb869dee282; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72257028, jitterRate=0.07671362161636353}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:02:41,082 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,082 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,082 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for 7651663bc25f23dec0bfbfb869dee282: Running coprocessor pre-open hook at 1732255361070Writing region info on filesystem at 1732255361070Initializing all the Stores at 1732255361071 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255361071Cleaning up temporary data from old regions at 1732255361076 (+5 ms)Running coprocessor post-open hooks at 1732255361082 (+6 ms)Region opened successfully at 1732255361082 2024-11-22T06:02:41,083 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,083 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,083 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,084 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282., pid=28, masterSystemTime=1732255361064 2024-11-22T06:02:41,085 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,086 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:41,086 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:41,087 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=7651663bc25f23dec0bfbfb869dee282, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:41,088 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:41,088 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened 1737f7a70c93ee364e2e424e900dc473; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71641844, jitterRate=0.06754666566848755}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:02:41,089 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,089 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for 1737f7a70c93ee364e2e424e900dc473: Running coprocessor pre-open hook at 1732255361076Writing region info on filesystem at 1732255361077 (+1 ms)Initializing all the Stores at 1732255361077Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255361078 (+1 ms)Cleaning up temporary data from old regions at 1732255361083 (+5 ms)Running coprocessor post-open hooks at 1732255361089 (+6 ms)Region opened successfully at 1732255361089 2024-11-22T06:02:41,090 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473., pid=29, masterSystemTime=1732255361072 2024-11-22T06:02:41,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7651663bc25f23dec0bfbfb869dee282, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:02:41,092 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:41,092 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:41,094 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=1737f7a70c93ee364e2e424e900dc473, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:02:41,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-11-22T06:02:41,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure 7651663bc25f23dec0bfbfb869dee282, server=407411ea95db,41799,1732255321718 in 183 msec 2024-11-22T06:02:41,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1737f7a70c93ee364e2e424e900dc473, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:02:41,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, ASSIGN in 348 msec 2024-11-22T06:02:41,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=26 2024-11-22T06:02:41,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure 1737f7a70c93ee364e2e424e900dc473, server=407411ea95db,44619,1732255321573 in 181 msec 2024-11-22T06:02:41,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-11-22T06:02:41,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, ASSIGN in 352 msec 2024-11-22T06:02:41,104 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:02:41,104 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255361104"}]},"ts":"1732255361104"} 2024-11-22T06:02:41,107 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-22T06:02:41,109 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:02:41,109 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-22T06:02:41,115 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-22T06:02:41,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-22T06:02:41,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-22T06:02:41,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-22T06:02:41,152 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:41,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:41,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:41,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:41,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:41,203 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:41,203 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:41,203 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:41,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 542 msec 2024-11-22T06:02:41,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-22T06:02:41,290 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-22T06:02:41,290 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-22T06:02:41,290 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:41,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-22T06:02:41,294 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:41,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-11-22T06:02:41,294 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:41,299 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-22T06:02:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255361299 (current time:1732255361299). 2024-11-22T06:02:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:02:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-22T06:02:41,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:02:41,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d9e96d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:41,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:41,301 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:41,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:41,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:41,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@275c2a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:41,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:41,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,303 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:41,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53031894, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:41,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:41,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:41,306 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60570, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:41,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:02:41,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:41,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,308 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ee50f5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:41,311 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:41,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:41,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:41,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3eeac293, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:41,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:41,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,313 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:41,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76486370, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:41,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:41,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:41,318 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:41,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:41,322 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:02:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,323 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:41,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-22T06:02:41,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:02:41,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-22T06:02:41,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-22T06:02:41,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-22T06:02:41,328 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:02:41,330 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:02:41,333 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:02:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741896_1072 (size=161) 2024-11-22T06:02:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741896_1072 (size=161) 2024-11-22T06:02:41,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741896_1072 (size=161) 2024-11-22T06:02:41,348 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:02:41,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282}] 2024-11-22T06:02:41,350 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,351 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-22T06:02:41,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-22T06:02:41,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-22T06:02:41,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for 7651663bc25f23dec0bfbfb869dee282: 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for 1737f7a70c93ee364e2e424e900dc473: 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:41,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:02:41,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741897_1073 (size=68) 2024-11-22T06:02:41,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741898_1074 (size=68) 2024-11-22T06:02:41,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741897_1073 (size=68) 2024-11-22T06:02:41,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741897_1073 (size=68) 2024-11-22T06:02:41,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:41,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-22T06:02:41,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741898_1074 (size=68) 2024-11-22T06:02:41,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741898_1074 (size=68) 2024-11-22T06:02:41,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-22T06:02:41,519 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:41,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-22T06:02:41,519 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-22T06:02:41,520 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,520 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282 in 172 msec 2024-11-22T06:02:41,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=31, resume processing ppid=30 2024-11-22T06:02:41,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473 in 172 msec 2024-11-22T06:02:41,523 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:02:41,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:02:41,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:02:41,525 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-22T06:02:41,526 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-22T06:02:41,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741899_1075 (size=543) 2024-11-22T06:02:41,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741899_1075 (size=543) 2024-11-22T06:02:41,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741899_1075 (size=543) 2024-11-22T06:02:41,539 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:02:41,545 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:02:41,546 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-22T06:02:41,548 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:02:41,548 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-22T06:02:41,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 223 msec 2024-11-22T06:02:41,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-22T06:02:41,649 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-22T06:02:41,656 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0f300f42479b5dee877b7b7f33babee86', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:02:41,658 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='188818b771c3584d924cffe5fde823079', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:41,659 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='37efd8abc28ec7eb3348e378c496ac143', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:41,660 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='27be07f5fad388c04754b75b9223418b9', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:41,661 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='441d8a8d9d32e34b4df7c2374a1695ceb', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:41,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:02:41,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41799 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:02:41,675 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:41,679 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-22T06:02:41,679 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:41,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:41,682 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:41,690 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:41,699 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:41,706 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-22T06:02:41,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255361706 (current time:1732255361706). 2024-11-22T06:02:41,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:02:41,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-22T06:02:41,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:02:41,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d794519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:41,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:41,708 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:41,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:41,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:41,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68fe99e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:41,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:41,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,711 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:41,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34c7eb6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:41,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:41,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:41,715 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:41,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:02:41,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:41,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,717 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:41,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24fa94fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:41,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:41,725 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:41,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:41,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:41,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a1fa5f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:41,726 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:41,726 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,727 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54216, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:41,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ce142fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:41,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:41,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:41,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:41,731 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60594, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:41,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:41,740 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:02:41,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:41,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:41,741 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:41,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-22T06:02:41,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:02:41,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-22T06:02:41,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-22T06:02:41,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-22T06:02:41,750 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:02:41,757 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:02:41,771 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:02:41,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741900_1076 (size=156) 2024-11-22T06:02:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741900_1076 (size=156) 2024-11-22T06:02:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741900_1076 (size=156) 2024-11-22T06:02:41,787 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:02:41,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282}] 2024-11-22T06:02:41,789 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:41,789 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:41,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-22T06:02:41,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-22T06:02:41,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-22T06:02:41,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:41,943 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing 1737f7a70c93ee364e2e424e900dc473 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-22T06:02:41,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:41,943 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing 7651663bc25f23dec0bfbfb869dee282 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-22T06:02:41,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/.tmp/cf/a2da575b0120422286981e2f55babece is 71, key is 0b7e6d9e7bb0bc99fc8f0c474c06623d/cf:q/1732255361666/Put/seqid=0 2024-11-22T06:02:41,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/.tmp/cf/6ca25720b1d44f0aa4b13aeec6a49827 is 71, key is 1ee9c4c66fdf03d4736ea6ce1dbedaee/cf:q/1732255361671/Put/seqid=0 2024-11-22T06:02:41,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741902_1078 (size=8326) 2024-11-22T06:02:41,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741902_1078 (size=8326) 2024-11-22T06:02:41,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741902_1078 (size=8326) 2024-11-22T06:02:41,995 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/.tmp/cf/6ca25720b1d44f0aa4b13aeec6a49827 2024-11-22T06:02:41,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741901_1077 (size=5288) 2024-11-22T06:02:42,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741901_1077 (size=5288) 2024-11-22T06:02:42,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741901_1077 (size=5288) 2024-11-22T06:02:42,017 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/.tmp/cf/a2da575b0120422286981e2f55babece 2024-11-22T06:02:42,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/.tmp/cf/a2da575b0120422286981e2f55babece as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/cf/a2da575b0120422286981e2f55babece 2024-11-22T06:02:42,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/.tmp/cf/6ca25720b1d44f0aa4b13aeec6a49827 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/cf/6ca25720b1d44f0aa4b13aeec6a49827 2024-11-22T06:02:42,051 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/cf/a2da575b0120422286981e2f55babece, entries=3, sequenceid=6, filesize=5.2 K 2024-11-22T06:02:42,053 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 1737f7a70c93ee364e2e424e900dc473 in 110ms, sequenceid=6, compaction requested=false 2024-11-22T06:02:42,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-22T06:02:42,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for 1737f7a70c93ee364e2e424e900dc473: 2024-11-22T06:02:42,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. for snaptb0-testExportWithResetTtl completed. 2024-11-22T06:02:42,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-22T06:02:42,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:42,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/cf/a2da575b0120422286981e2f55babece] hfiles 2024-11-22T06:02:42,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/cf/a2da575b0120422286981e2f55babece for snapshot=snaptb0-testExportWithResetTtl 2024-11-22T06:02:42,067 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/cf/6ca25720b1d44f0aa4b13aeec6a49827, entries=47, sequenceid=6, filesize=8.1 K 2024-11-22T06:02:42,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-22T06:02:42,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 7651663bc25f23dec0bfbfb869dee282 in 128ms, sequenceid=6, compaction requested=false 2024-11-22T06:02:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for 7651663bc25f23dec0bfbfb869dee282: 2024-11-22T06:02:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. for snaptb0-testExportWithResetTtl completed. 2024-11-22T06:02:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-22T06:02:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/cf/6ca25720b1d44f0aa4b13aeec6a49827] hfiles 2024-11-22T06:02:42,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/cf/6ca25720b1d44f0aa4b13aeec6a49827 for snapshot=snaptb0-testExportWithResetTtl 2024-11-22T06:02:42,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741903_1079 (size=107) 2024-11-22T06:02:42,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741903_1079 (size=107) 2024-11-22T06:02:42,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741903_1079 (size=107) 2024-11-22T06:02:42,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:02:42,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-22T06:02:42,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-22T06:02:42,114 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:42,114 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:02:42,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1737f7a70c93ee364e2e424e900dc473 in 329 msec 2024-11-22T06:02:42,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741904_1080 (size=107) 2024-11-22T06:02:42,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:02:42,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-22T06:02:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-22T06:02:42,134 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:42,134 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:02:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741904_1080 (size=107) 2024-11-22T06:02:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741904_1080 (size=107) 2024-11-22T06:02:42,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-11-22T06:02:42,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7651663bc25f23dec0bfbfb869dee282 in 349 msec 2024-11-22T06:02:42,138 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:02:42,140 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:02:42,141 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:02:42,141 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-22T06:02:42,142 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-22T06:02:42,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741905_1081 (size=621) 2024-11-22T06:02:42,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741905_1081 (size=621) 2024-11-22T06:02:42,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741905_1081 (size=621) 2024-11-22T06:02:42,160 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:02:42,168 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:02:42,169 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-22T06:02:42,171 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:02:42,171 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-22T06:02:42,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 429 msec 2024-11-22T06:02:42,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-22T06:02:42,380 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-22T06:02:42,381 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:02:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-22T06:02:42,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:02:42,387 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:42,387 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-22T06:02:42,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-22T06:02:42,389 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:02:42,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741906_1082 (size=397) 2024-11-22T06:02:42,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741906_1082 (size=397) 2024-11-22T06:02:42,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741906_1082 (size=397) 2024-11-22T06:02:42,407 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 580aaa30dff978e83d0e605a1fe4c9be, NAME => 'testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:42,408 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 915eae74ee43cf48f7325fae2d97b580, NAME => 'testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:42,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741908_1084 (size=58) 2024-11-22T06:02:42,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741907_1083 (size=58) 2024-11-22T06:02:42,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741907_1083 (size=58) 2024-11-22T06:02:42,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741907_1083 (size=58) 2024-11-22T06:02:42,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741908_1084 (size=58) 2024-11-22T06:02:42,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741908_1084 (size=58) 2024-11-22T06:02:42,426 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:42,426 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 580aaa30dff978e83d0e605a1fe4c9be, disabling compactions & flushes 2024-11-22T06:02:42,426 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:42,426 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:42,426 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. after waiting 0 ms 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:42,427 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 580aaa30dff978e83d0e605a1fe4c9be: Waiting for close lock at 1732255362426Disabling compacts and flushes for region at 1732255362426Disabling writes for close at 1732255362427 (+1 ms)Writing region close event to WAL at 1732255362427Closed at 1732255362427 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 915eae74ee43cf48f7325fae2d97b580, disabling compactions & flushes 2024-11-22T06:02:42,427 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. after waiting 0 ms 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:42,427 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:42,427 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 915eae74ee43cf48f7325fae2d97b580: Waiting for close lock at 1732255362427Disabling compacts and flushes for region at 1732255362427Disabling writes for close at 1732255362427Writing region close event to WAL at 1732255362427Closed at 1732255362427 2024-11-22T06:02:42,429 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:02:42,429 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732255362429"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255362429"}]},"ts":"1732255362429"} 2024-11-22T06:02:42,429 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732255362429"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255362429"}]},"ts":"1732255362429"} 2024-11-22T06:02:42,432 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:02:42,433 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:02:42,433 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255362433"}]},"ts":"1732255362433"} 2024-11-22T06:02:42,436 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-22T06:02:42,436 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:02:42,437 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:02:42,437 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:02:42,437 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:02:42,438 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:02:42,438 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:02:42,438 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:02:42,438 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:02:42,438 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:02:42,438 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:02:42,438 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:02:42,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, ASSIGN}] 2024-11-22T06:02:42,439 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, ASSIGN 2024-11-22T06:02:42,440 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, ASSIGN 2024-11-22T06:02:42,441 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, ASSIGN; state=OFFLINE, location=407411ea95db,41799,1732255321718; forceNewPlan=false, retain=false 2024-11-22T06:02:42,441 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:02:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-22T06:02:42,591 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:02:42,591 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=915eae74ee43cf48f7325fae2d97b580, regionState=OPENING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:42,591 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=580aaa30dff978e83d0e605a1fe4c9be, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:02:42,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, ASSIGN because future has completed 2024-11-22T06:02:42,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:02:42,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, ASSIGN because future has completed 2024-11-22T06:02:42,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure 915eae74ee43cf48f7325fae2d97b580, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:02:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-22T06:02:42,751 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:42,751 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => 580aaa30dff978e83d0e605a1fe4c9be, NAME => 'testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:02:42,751 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:42,752 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => 915eae74ee43cf48f7325fae2d97b580, NAME => 'testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:02:42,752 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. service=AccessControlService 2024-11-22T06:02:42,752 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:42,752 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,752 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:42,752 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,753 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,751 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. service=AccessControlService 2024-11-22T06:02:42,753 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:02:42,753 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,753 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:02:42,753 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,753 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,755 INFO [StoreOpener-915eae74ee43cf48f7325fae2d97b580-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,757 INFO [StoreOpener-915eae74ee43cf48f7325fae2d97b580-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 915eae74ee43cf48f7325fae2d97b580 columnFamilyName cf 2024-11-22T06:02:42,757 DEBUG [StoreOpener-915eae74ee43cf48f7325fae2d97b580-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:42,757 INFO [StoreOpener-915eae74ee43cf48f7325fae2d97b580-1 {}] regionserver.HStore(327): Store=915eae74ee43cf48f7325fae2d97b580/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:42,758 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,759 INFO [StoreOpener-580aaa30dff978e83d0e605a1fe4c9be-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,759 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,759 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,760 INFO [StoreOpener-580aaa30dff978e83d0e605a1fe4c9be-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 580aaa30dff978e83d0e605a1fe4c9be columnFamilyName cf 2024-11-22T06:02:42,761 DEBUG [StoreOpener-580aaa30dff978e83d0e605a1fe4c9be-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:02:42,761 INFO [StoreOpener-580aaa30dff978e83d0e605a1fe4c9be-1 {}] regionserver.HStore(327): Store=580aaa30dff978e83d0e605a1fe4c9be/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:02:42,761 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,761 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,762 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,762 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,763 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,763 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,763 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,763 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,765 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:42,766 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened 915eae74ee43cf48f7325fae2d97b580; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61057519, jitterRate=-0.09017206728458405}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:02:42,766 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:42,767 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,767 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for 915eae74ee43cf48f7325fae2d97b580: Running coprocessor pre-open hook at 1732255362753Writing region info on filesystem at 1732255362753Initializing all the Stores at 1732255362754 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255362754Cleaning up temporary data from old regions at 1732255362762 (+8 ms)Running coprocessor post-open hooks at 1732255362766 (+4 ms)Region opened successfully at 1732255362767 (+1 ms) 2024-11-22T06:02:42,769 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580., pid=40, masterSystemTime=1732255362748 2024-11-22T06:02:42,770 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:02:42,771 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened 580aaa30dff978e83d0e605a1fe4c9be; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65271365, jitterRate=-0.027380868792533875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:02:42,772 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:42,772 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for 580aaa30dff978e83d0e605a1fe4c9be: Running coprocessor pre-open hook at 1732255362754Writing region info on filesystem at 1732255362754Initializing all the Stores at 1732255362755 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255362755Cleaning up temporary data from old regions at 1732255362763 (+8 ms)Running coprocessor post-open hooks at 1732255362772 (+9 ms)Region opened successfully at 1732255362772 2024-11-22T06:02:42,772 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:42,773 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be., pid=39, masterSystemTime=1732255362747 2024-11-22T06:02:42,773 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=915eae74ee43cf48f7325fae2d97b580, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:02:42,774 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:42,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure 915eae74ee43cf48f7325fae2d97b580, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:02:42,776 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:42,776 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:42,777 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=580aaa30dff978e83d0e605a1fe4c9be, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:02:42,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:02:42,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-11-22T06:02:42,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure 915eae74ee43cf48f7325fae2d97b580, server=407411ea95db,41799,1732255321718 in 181 msec 2024-11-22T06:02:42,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, ASSIGN in 343 msec 2024-11-22T06:02:42,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=37 2024-11-22T06:02:42,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be, server=407411ea95db,35283,1732255321784 in 188 msec 2024-11-22T06:02:42,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=37, resume processing ppid=36 2024-11-22T06:02:42,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, ASSIGN in 347 msec 2024-11-22T06:02:42,790 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:02:42,790 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255362790"}]},"ts":"1732255362790"} 2024-11-22T06:02:42,793 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-22T06:02:42,795 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:02:42,795 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-22T06:02:42,800 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-22T06:02:42,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:42,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:42,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:42,810 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:02:42,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,823 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:02:42,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,824 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,824 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,824 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,824 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,825 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:02:42,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 444 msec 2024-11-22T06:02:43,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-22T06:02:43,020 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-22T06:02:43,020 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-22T06:02:43,020 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:43,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-22T06:02:43,026 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:43,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-11-22T06:02:43,026 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:43,035 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='0c525458720d8efa6b1d95718e2454637', locateType=CURRENT is [region=testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:02:43,037 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='1dc64f75f256093e746e96f42136e4e3d', locateType=CURRENT is [region=testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:43,038 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2fe4523983f968394c532d9bbd48533c8', locateType=CURRENT is [region=testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:43,040 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='48b7b80dcbcd95f6536d58cf083a342dc', locateType=CURRENT is [region=testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:43,041 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='368f72e55656e6b2970f5c16b70fb7c16', locateType=CURRENT is [region=testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:43,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:02:43,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41799 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:02:43,051 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:43,055 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-22T06:02:43,055 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:43,055 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:02:43,058 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:43,066 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:43,076 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-22T06:02:43,080 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-22T06:02:43,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255363080 (current time:1732255363080). 2024-11-22T06:02:43,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-22T06:02:43,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:02:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3745935f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:43,082 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:43,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:43,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:43,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8202c1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:43,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:43,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:43,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:43,084 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53278, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:43,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@619a5d76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:43,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:43,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:43,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:43,088 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:43,090 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:02:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:43,090 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3afcbee3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:02:43,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:02:43,093 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:02:43,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:02:43,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:02:43,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30abc924, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:43,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:02:43,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:02:43,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:43,095 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53290, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:02:43,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26704f56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:02:43,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:02:43,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:02:43,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:02:43,099 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59226, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:02:43,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:02:43,104 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:02:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:02:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:02:43,104 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:02:43,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-22T06:02:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:02:43,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-22T06:02:43,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-22T06:02:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-22T06:02:43,109 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:02:43,111 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:02:43,115 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:02:43,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741909_1085 (size=143) 2024-11-22T06:02:43,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741909_1085 (size=143) 2024-11-22T06:02:43,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741909_1085 (size=143) 2024-11-22T06:02:43,150 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:02:43,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 915eae74ee43cf48f7325fae2d97b580}] 2024-11-22T06:02:43,153 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:43,153 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-22T06:02:43,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-22T06:02:43,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-22T06:02:43,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:43,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:43,306 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing 580aaa30dff978e83d0e605a1fe4c9be 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-22T06:02:43,306 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing 915eae74ee43cf48f7325fae2d97b580 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-22T06:02:43,330 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/.tmp/cf/d72cac99c8b948b89bc980dc50fd33cd is 71, key is 02985a58ee710fb033e1637cf5943bd5/cf:q/1732255363045/Put/seqid=0 2024-11-22T06:02:43,330 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/.tmp/cf/e2f52d3535074cf78441d2d150303072 is 71, key is 1018b61a057d43045e64f2d641a7b873/cf:q/1732255363048/Put/seqid=0 2024-11-22T06:02:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741911_1087 (size=8256) 2024-11-22T06:02:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741911_1087 (size=8256) 2024-11-22T06:02:43,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741911_1087 (size=8256) 2024-11-22T06:02:43,371 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/.tmp/cf/e2f52d3535074cf78441d2d150303072 2024-11-22T06:02:43,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/.tmp/cf/e2f52d3535074cf78441d2d150303072 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/cf/e2f52d3535074cf78441d2d150303072 2024-11-22T06:02:43,398 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/cf/e2f52d3535074cf78441d2d150303072, entries=46, sequenceid=5, filesize=8.1 K 2024-11-22T06:02:43,399 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 915eae74ee43cf48f7325fae2d97b580 in 93ms, sequenceid=5, compaction requested=false 2024-11-22T06:02:43,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-22T06:02:43,400 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for 915eae74ee43cf48f7325fae2d97b580: 2024-11-22T06:02:43,400 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. for snaptb-testExportWithResetTtl completed. 2024-11-22T06:02:43,400 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-22T06:02:43,400 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:43,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/cf/e2f52d3535074cf78441d2d150303072] hfiles 2024-11-22T06:02:43,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/cf/e2f52d3535074cf78441d2d150303072 for snapshot=snaptb-testExportWithResetTtl 2024-11-22T06:02:43,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741910_1086 (size=5356) 2024-11-22T06:02:43,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741910_1086 (size=5356) 2024-11-22T06:02:43,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741910_1086 (size=5356) 2024-11-22T06:02:43,408 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/.tmp/cf/d72cac99c8b948b89bc980dc50fd33cd 2024-11-22T06:02:43,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/.tmp/cf/d72cac99c8b948b89bc980dc50fd33cd as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/cf/d72cac99c8b948b89bc980dc50fd33cd 2024-11-22T06:02:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-22T06:02:43,432 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/cf/d72cac99c8b948b89bc980dc50fd33cd, entries=4, sequenceid=5, filesize=5.2 K 2024-11-22T06:02:43,434 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 580aaa30dff978e83d0e605a1fe4c9be in 128ms, sequenceid=5, compaction requested=false 2024-11-22T06:02:43,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for 580aaa30dff978e83d0e605a1fe4c9be: 2024-11-22T06:02:43,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. for snaptb-testExportWithResetTtl completed. 2024-11-22T06:02:43,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-22T06:02:43,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:02:43,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/cf/d72cac99c8b948b89bc980dc50fd33cd] hfiles 2024-11-22T06:02:43,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/cf/d72cac99c8b948b89bc980dc50fd33cd for snapshot=snaptb-testExportWithResetTtl 2024-11-22T06:02:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741912_1088 (size=100) 2024-11-22T06:02:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741912_1088 (size=100) 2024-11-22T06:02:43,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741912_1088 (size=100) 2024-11-22T06:02:43,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:02:43,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-22T06:02:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-22T06:02:43,469 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:43,469 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:02:43,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 915eae74ee43cf48f7325fae2d97b580 in 322 msec 2024-11-22T06:02:43,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741913_1089 (size=100) 2024-11-22T06:02:43,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741913_1089 (size=100) 2024-11-22T06:02:43,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741913_1089 (size=100) 2024-11-22T06:02:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-22T06:02:43,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:02:43,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-22T06:02:43,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-22T06:02:43,909 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:43,910 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:02:43,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-22T06:02:43,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be in 761 msec 2024-11-22T06:02:43,920 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:02:43,923 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:02:43,924 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:02:43,924 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-22T06:02:43,926 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-22T06:02:43,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741914_1090 (size=600) 2024-11-22T06:02:43,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741914_1090 (size=600) 2024-11-22T06:02:43,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741914_1090 (size=600) 2024-11-22T06:02:43,977 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:02:43,989 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:02:43,990 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-22T06:02:43,992 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:02:43,992 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-22T06:02:43,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 887 msec 2024-11-22T06:02:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-22T06:02:44,250 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-22T06:02:44,265 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265 2024-11-22T06:02:44,265 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:44,316 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:02:44,316 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-22T06:02:44,319 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:02:44,324 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-22T06:02:44,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741915_1091 (size=143) 2024-11-22T06:02:44,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741915_1091 (size=143) 2024-11-22T06:02:44,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741915_1091 (size=143) 2024-11-22T06:02:44,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741916_1092 (size=600) 2024-11-22T06:02:44,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741916_1092 (size=600) 2024-11-22T06:02:44,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741916_1092 (size=600) 2024-11-22T06:02:44,729 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0001_000001 (auth:SIMPLE) from 127.0.0.1:38524 2024-11-22T06:02:44,730 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0001/container_1732255328437_0001_01_000001/launch_container.sh] 2024-11-22T06:02:44,730 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0001/container_1732255328437_0001_01_000001/container_tokens] 2024-11-22T06:02:44,730 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0001/container_1732255328437_0001_01_000001/sysfs] 2024-11-22T06:02:44,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741917_1093 (size=141) 2024-11-22T06:02:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741917_1093 (size=141) 2024-11-22T06:02:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741917_1093 (size=141) 2024-11-22T06:02:44,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:44,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:44,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:45,844 INFO [master/407411ea95db:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T06:02:45,844 INFO [master/407411ea95db:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T06:02:45,889 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:02:45,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-5176768466148409716.jar 2024-11-22T06:02:45,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:45,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:46,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-13381862217510694094.jar 2024-11-22T06:02:46,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:46,001 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:46,001 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:46,001 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:46,002 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:46,002 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:02:46,002 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:02:46,003 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:02:46,003 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:02:46,003 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:02:46,004 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:02:46,004 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:02:46,004 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:02:46,005 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:02:46,005 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:02:46,005 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:02:46,006 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:02:46,006 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:46,006 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:46,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:02:46,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:46,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:02:46,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:02:46,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:02:46,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741918_1094 (size=131440) 2024-11-22T06:02:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741918_1094 (size=131440) 2024-11-22T06:02:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741918_1094 (size=131440) 2024-11-22T06:02:46,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741919_1095 (size=4188619) 2024-11-22T06:02:46,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741919_1095 (size=4188619) 2024-11-22T06:02:46,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741919_1095 (size=4188619) 2024-11-22T06:02:46,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741920_1096 (size=1323991) 2024-11-22T06:02:46,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741920_1096 (size=1323991) 2024-11-22T06:02:46,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741920_1096 (size=1323991) 2024-11-22T06:02:46,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741921_1097 (size=903736) 2024-11-22T06:02:46,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741921_1097 (size=903736) 2024-11-22T06:02:46,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741921_1097 (size=903736) 2024-11-22T06:02:46,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741922_1098 (size=8360083) 2024-11-22T06:02:46,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741922_1098 (size=8360083) 2024-11-22T06:02:46,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741922_1098 (size=8360083) 2024-11-22T06:02:46,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741923_1099 (size=1877034) 2024-11-22T06:02:46,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741923_1099 (size=1877034) 2024-11-22T06:02:46,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741923_1099 (size=1877034) 2024-11-22T06:02:46,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741924_1100 (size=6424744) 2024-11-22T06:02:46,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741924_1100 (size=6424744) 2024-11-22T06:02:46,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741924_1100 (size=6424744) 2024-11-22T06:02:46,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741925_1101 (size=77835) 2024-11-22T06:02:46,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741925_1101 (size=77835) 2024-11-22T06:02:46,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741925_1101 (size=77835) 2024-11-22T06:02:46,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741926_1102 (size=30949) 2024-11-22T06:02:46,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741926_1102 (size=30949) 2024-11-22T06:02:46,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741926_1102 (size=30949) 2024-11-22T06:02:46,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741927_1103 (size=1597347) 2024-11-22T06:02:46,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741927_1103 (size=1597347) 2024-11-22T06:02:46,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741927_1103 (size=1597347) 2024-11-22T06:02:46,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741928_1104 (size=4695811) 2024-11-22T06:02:46,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741928_1104 (size=4695811) 2024-11-22T06:02:46,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741928_1104 (size=4695811) 2024-11-22T06:02:46,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741929_1105 (size=232957) 2024-11-22T06:02:46,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741929_1105 (size=232957) 2024-11-22T06:02:46,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741929_1105 (size=232957) 2024-11-22T06:02:46,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741930_1106 (size=127628) 2024-11-22T06:02:46,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741930_1106 (size=127628) 2024-11-22T06:02:46,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741930_1106 (size=127628) 2024-11-22T06:02:46,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741931_1107 (size=20406) 2024-11-22T06:02:46,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741931_1107 (size=20406) 2024-11-22T06:02:46,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741931_1107 (size=20406) 2024-11-22T06:02:46,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741932_1108 (size=5175431) 2024-11-22T06:02:46,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741932_1108 (size=5175431) 2024-11-22T06:02:46,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741932_1108 (size=5175431) 2024-11-22T06:02:46,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741933_1109 (size=217634) 2024-11-22T06:02:46,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741933_1109 (size=217634) 2024-11-22T06:02:46,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741933_1109 (size=217634) 2024-11-22T06:02:47,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741934_1110 (size=1832290) 2024-11-22T06:02:47,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741934_1110 (size=1832290) 2024-11-22T06:02:47,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741934_1110 (size=1832290) 2024-11-22T06:02:47,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741935_1111 (size=322274) 2024-11-22T06:02:47,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741935_1111 (size=322274) 2024-11-22T06:02:47,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741935_1111 (size=322274) 2024-11-22T06:02:47,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741936_1112 (size=440656) 2024-11-22T06:02:47,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741936_1112 (size=440656) 2024-11-22T06:02:47,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741936_1112 (size=440656) 2024-11-22T06:02:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741937_1113 (size=503880) 2024-11-22T06:02:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741937_1113 (size=503880) 2024-11-22T06:02:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741937_1113 (size=503880) 2024-11-22T06:02:47,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741938_1114 (size=29229) 2024-11-22T06:02:47,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741938_1114 (size=29229) 2024-11-22T06:02:47,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741938_1114 (size=29229) 2024-11-22T06:02:47,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741939_1115 (size=24096) 2024-11-22T06:02:47,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741939_1115 (size=24096) 2024-11-22T06:02:47,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741939_1115 (size=24096) 2024-11-22T06:02:47,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741940_1116 (size=111872) 2024-11-22T06:02:47,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741940_1116 (size=111872) 2024-11-22T06:02:47,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741940_1116 (size=111872) 2024-11-22T06:02:47,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741941_1117 (size=45609) 2024-11-22T06:02:47,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741941_1117 (size=45609) 2024-11-22T06:02:47,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741941_1117 (size=45609) 2024-11-22T06:02:47,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741942_1118 (size=136454) 2024-11-22T06:02:47,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741942_1118 (size=136454) 2024-11-22T06:02:47,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741942_1118 (size=136454) 2024-11-22T06:02:47,193 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:02:47,196 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-22T06:02:47,199 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:02:47,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741943_1119 (size=324) 2024-11-22T06:02:47,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741943_1119 (size=324) 2024-11-22T06:02:47,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741943_1119 (size=324) 2024-11-22T06:02:47,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741944_1120 (size=15) 2024-11-22T06:02:47,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741944_1120 (size=15) 2024-11-22T06:02:47,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741944_1120 (size=15) 2024-11-22T06:02:47,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741945_1121 (size=303728) 2024-11-22T06:02:47,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741945_1121 (size=303728) 2024-11-22T06:02:47,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741945_1121 (size=303728) 2024-11-22T06:02:47,314 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:02:47,314 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:02:47,722 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0002_000001 (auth:SIMPLE) from 127.0.0.1:38538 2024-11-22T06:02:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-22T06:02:51,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-22T06:02:53,923 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0002_000001 (auth:SIMPLE) from 127.0.0.1:40734 2024-11-22T06:02:54,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741946_1122 (size=349378) 2024-11-22T06:02:54,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741946_1122 (size=349378) 2024-11-22T06:02:54,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741946_1122 (size=349378) 2024-11-22T06:02:56,138 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0002_000001 (auth:SIMPLE) from 127.0.0.1:45682 2024-11-22T06:02:59,720 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:02:59,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741947_1123 (size=8256) 2024-11-22T06:02:59,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741947_1123 (size=8256) 2024-11-22T06:02:59,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741947_1123 (size=8256) 2024-11-22T06:02:59,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741948_1124 (size=5356) 2024-11-22T06:02:59,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741948_1124 (size=5356) 2024-11-22T06:02:59,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741948_1124 (size=5356) 2024-11-22T06:03:00,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741949_1125 (size=17398) 2024-11-22T06:03:00,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741949_1125 (size=17398) 2024-11-22T06:03:00,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741949_1125 (size=17398) 2024-11-22T06:03:00,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741950_1126 (size=461) 2024-11-22T06:03:00,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741950_1126 (size=461) 2024-11-22T06:03:00,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741950_1126 (size=461) 2024-11-22T06:03:00,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741951_1127 (size=17398) 2024-11-22T06:03:00,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741951_1127 (size=17398) 2024-11-22T06:03:00,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741951_1127 (size=17398) 2024-11-22T06:03:00,079 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_3/usercache/jenkins/appcache/application_1732255328437_0002/container_1732255328437_0002_01_000002/launch_container.sh] 2024-11-22T06:03:00,080 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_3/usercache/jenkins/appcache/application_1732255328437_0002/container_1732255328437_0002_01_000002/container_tokens] 2024-11-22T06:03:00,080 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_3/usercache/jenkins/appcache/application_1732255328437_0002/container_1732255328437_0002_01_000002/sysfs] 2024-11-22T06:03:00,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741952_1128 (size=349378) 2024-11-22T06:03:00,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741952_1128 (size=349378) 2024-11-22T06:03:00,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741952_1128 (size=349378) 2024-11-22T06:03:00,104 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0002_000001 (auth:SIMPLE) from 127.0.0.1:45686 2024-11-22T06:03:01,504 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:03:01,504 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:03:01,515 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-22T06:03:01,515 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:03:01,516 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:03:01,516 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-22T06:03:01,516 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-22T06:03:01,516 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-22T06:03:01,516 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-22T06:03:01,517 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-22T06:03:01,517 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255364265/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-22T06:03:01,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-22T06:03:01,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-22T06:03:01,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-22T06:03:01,535 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255381535"}]},"ts":"1732255381535"} 2024-11-22T06:03:01,542 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-22T06:03:01,542 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-22T06:03:01,544 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-22T06:03:01,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, UNASSIGN}] 2024-11-22T06:03:01,549 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, UNASSIGN 2024-11-22T06:03:01,550 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, UNASSIGN 2024-11-22T06:03:01,551 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=580aaa30dff978e83d0e605a1fe4c9be, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:01,552 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=915eae74ee43cf48f7325fae2d97b580, regionState=CLOSING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:03:01,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, UNASSIGN because future has completed 2024-11-22T06:03:01,554 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:01,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:03:01,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, UNASSIGN because future has completed 2024-11-22T06:03:01,556 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:01,556 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure 915eae74ee43cf48f7325fae2d97b580, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:03:01,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-22T06:03:01,709 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:03:01,709 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:01,709 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing 580aaa30dff978e83d0e605a1fe4c9be, disabling compactions & flushes 2024-11-22T06:03:01,709 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:03:01,709 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:03:01,709 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. after waiting 0 ms 2024-11-22T06:03:01,709 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:03:01,715 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-22T06:03:01,716 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:01,716 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be. 2024-11-22T06:03:01,716 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for 580aaa30dff978e83d0e605a1fe4c9be: Waiting for close lock at 1732255381709Running coprocessor pre-close hooks at 1732255381709Disabling compacts and flushes for region at 1732255381709Disabling writes for close at 1732255381709Writing region close event to WAL at 1732255381710 (+1 ms)Running coprocessor post-close hooks at 1732255381716 (+6 ms)Closed at 1732255381716 2024-11-22T06:03:01,717 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:03:01,718 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:01,718 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing 915eae74ee43cf48f7325fae2d97b580, disabling compactions & flushes 2024-11-22T06:03:01,718 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:03:01,718 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:03:01,718 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. after waiting 0 ms 2024-11-22T06:03:01,718 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:03:01,719 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed 580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:03:01,720 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=580aaa30dff978e83d0e605a1fe4c9be, regionState=CLOSED 2024-11-22T06:03:01,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:03:01,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-11-22T06:03:01,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure 580aaa30dff978e83d0e605a1fe4c9be, server=407411ea95db,35283,1732255321784 in 169 msec 2024-11-22T06:03:01,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=580aaa30dff978e83d0e605a1fe4c9be, UNASSIGN in 178 msec 2024-11-22T06:03:01,734 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-22T06:03:01,734 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:01,735 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580. 2024-11-22T06:03:01,735 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for 915eae74ee43cf48f7325fae2d97b580: Waiting for close lock at 1732255381718Running coprocessor pre-close hooks at 1732255381718Disabling compacts and flushes for region at 1732255381718Disabling writes for close at 1732255381718Writing region close event to WAL at 1732255381729 (+11 ms)Running coprocessor post-close hooks at 1732255381734 (+5 ms)Closed at 1732255381735 (+1 ms) 2024-11-22T06:03:01,737 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed 915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:03:01,738 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=915eae74ee43cf48f7325fae2d97b580, regionState=CLOSED 2024-11-22T06:03:01,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure 915eae74ee43cf48f7325fae2d97b580, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:03:01,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-11-22T06:03:01,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure 915eae74ee43cf48f7325fae2d97b580, server=407411ea95db,41799,1732255321718 in 185 msec 2024-11-22T06:03:01,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-11-22T06:03:01,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=915eae74ee43cf48f7325fae2d97b580, UNASSIGN in 196 msec 2024-11-22T06:03:01,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-22T06:03:01,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 201 msec 2024-11-22T06:03:01,750 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255381750"}]},"ts":"1732255381750"} 2024-11-22T06:03:01,752 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-22T06:03:01,752 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-22T06:03:01,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 226 msec 2024-11-22T06:03:01,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-22T06:03:01,850 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-22T06:03:01,852 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-22T06:03:01,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-22T06:03:01,856 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-22T06:03:01,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-22T06:03:01,858 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-22T06:03:01,861 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-22T06:03:01,863 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:03:01,863 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:03:01,865 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/recovered.edits] 2024-11-22T06:03:01,865 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/recovered.edits] 2024-11-22T06:03:01,870 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/cf/e2f52d3535074cf78441d2d150303072 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/cf/e2f52d3535074cf78441d2d150303072 2024-11-22T06:03:01,870 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/cf/d72cac99c8b948b89bc980dc50fd33cd to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/cf/d72cac99c8b948b89bc980dc50fd33cd 2024-11-22T06:03:01,873 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/recovered.edits/8.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580/recovered.edits/8.seqid 2024-11-22T06:03:01,873 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/recovered.edits/8.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be/recovered.edits/8.seqid 2024-11-22T06:03:01,873 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/915eae74ee43cf48f7325fae2d97b580 2024-11-22T06:03:01,873 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportWithResetTtl/580aaa30dff978e83d0e605a1fe4c9be 2024-11-22T06:03:01,873 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-22T06:03:01,876 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-22T06:03:01,879 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-22T06:03:01,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,889 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-22T06:03:01,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-22T06:03:01,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-22T06:03:01,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-22T06:03:01,891 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-22T06:03:01,893 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-22T06:03:01,893 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-22T06:03:01,893 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255381893"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:01,893 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255381893"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:01,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:01,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:01,898 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,898 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:01,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-22T06:03:01,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-22T06:03:01,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:01,899 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:01,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:01,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:01,900 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:03:01,900 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 580aaa30dff978e83d0e605a1fe4c9be, NAME => 'testExportWithResetTtl,,1732255362381.580aaa30dff978e83d0e605a1fe4c9be.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 915eae74ee43cf48f7325fae2d97b580, NAME => 'testExportWithResetTtl,1,1732255362381.915eae74ee43cf48f7325fae2d97b580.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:03:01,901 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-22T06:03:01,901 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255381901"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:01,904 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-22T06:03:01,906 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-22T06:03:01,908 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 54 msec 2024-11-22T06:03:02,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-22T06:03:02,010 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-22T06:03:02,010 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-22T06:03:02,011 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-22T06:03:02,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:03:02,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-22T06:03:02,016 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255382016"}]},"ts":"1732255382016"} 2024-11-22T06:03:02,018 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-22T06:03:02,018 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-22T06:03:02,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-22T06:03:02,021 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, UNASSIGN}] 2024-11-22T06:03:02,022 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, UNASSIGN 2024-11-22T06:03:02,022 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, UNASSIGN 2024-11-22T06:03:02,022 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=7651663bc25f23dec0bfbfb869dee282, regionState=CLOSING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:03:02,022 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=1737f7a70c93ee364e2e424e900dc473, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:02,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, UNASSIGN because future has completed 2024-11-22T06:03:02,025 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:02,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1737f7a70c93ee364e2e424e900dc473, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:02,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, UNASSIGN because future has completed 2024-11-22T06:03:02,026 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:02,026 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7651663bc25f23dec0bfbfb869dee282, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:03:02,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-22T06:03:02,178 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:03:02,178 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:02,178 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing 1737f7a70c93ee364e2e424e900dc473, disabling compactions & flushes 2024-11-22T06:03:02,178 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:03:02,178 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:03:02,178 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. after waiting 0 ms 2024-11-22T06:03:02,178 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:03:02,179 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:03:02,179 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:02,179 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing 7651663bc25f23dec0bfbfb869dee282, disabling compactions & flushes 2024-11-22T06:03:02,179 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:03:02,179 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:03:02,179 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. after waiting 0 ms 2024-11-22T06:03:02,179 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:03:02,183 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:03:02,183 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:03:02,184 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:02,184 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:02,184 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282. 2024-11-22T06:03:02,184 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473. 2024-11-22T06:03:02,184 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for 7651663bc25f23dec0bfbfb869dee282: Waiting for close lock at 1732255382179Running coprocessor pre-close hooks at 1732255382179Disabling compacts and flushes for region at 1732255382179Disabling writes for close at 1732255382179Writing region close event to WAL at 1732255382180 (+1 ms)Running coprocessor post-close hooks at 1732255382184 (+4 ms)Closed at 1732255382184 2024-11-22T06:03:02,184 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for 1737f7a70c93ee364e2e424e900dc473: Waiting for close lock at 1732255382178Running coprocessor pre-close hooks at 1732255382178Disabling compacts and flushes for region at 1732255382178Disabling writes for close at 1732255382178Writing region close event to WAL at 1732255382179 (+1 ms)Running coprocessor post-close hooks at 1732255382184 (+5 ms)Closed at 1732255382184 2024-11-22T06:03:02,186 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed 7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:03:02,187 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=7651663bc25f23dec0bfbfb869dee282, regionState=CLOSED 2024-11-22T06:03:02,187 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed 1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:03:02,189 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=1737f7a70c93ee364e2e424e900dc473, regionState=CLOSED 2024-11-22T06:03:02,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7651663bc25f23dec0bfbfb869dee282, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:03:02,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1737f7a70c93ee364e2e424e900dc473, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:02,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=54 2024-11-22T06:03:02,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-11-22T06:03:02,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure 1737f7a70c93ee364e2e424e900dc473, server=407411ea95db,44619,1732255321573 in 167 msec 2024-11-22T06:03:02,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure 7651663bc25f23dec0bfbfb869dee282, server=407411ea95db,41799,1732255321718 in 166 msec 2024-11-22T06:03:02,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7651663bc25f23dec0bfbfb869dee282, UNASSIGN in 173 msec 2024-11-22T06:03:02,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=53, resume processing ppid=52 2024-11-22T06:03:02,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=1737f7a70c93ee364e2e424e900dc473, UNASSIGN in 174 msec 2024-11-22T06:03:02,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-22T06:03:02,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 178 msec 2024-11-22T06:03:02,201 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255382201"}]},"ts":"1732255382201"} 2024-11-22T06:03:02,203 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-22T06:03:02,204 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-22T06:03:02,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 194 msec 2024-11-22T06:03:02,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-22T06:03:02,330 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-22T06:03:02,331 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-22T06:03:02,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:03:02,332 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:03:02,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-22T06:03:02,333 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:03:02,336 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-22T06:03:02,338 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:03:02,338 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:03:02,340 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/recovered.edits] 2024-11-22T06:03:02,340 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/recovered.edits] 2024-11-22T06:03:02,344 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/cf/6ca25720b1d44f0aa4b13aeec6a49827 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/cf/6ca25720b1d44f0aa4b13aeec6a49827 2024-11-22T06:03:02,344 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/cf/a2da575b0120422286981e2f55babece to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/cf/a2da575b0120422286981e2f55babece 2024-11-22T06:03:02,347 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282/recovered.edits/9.seqid 2024-11-22T06:03:02,347 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473/recovered.edits/9.seqid 2024-11-22T06:03:02,348 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/7651663bc25f23dec0bfbfb869dee282 2024-11-22T06:03:02,348 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithResetTtl/1737f7a70c93ee364e2e424e900dc473 2024-11-22T06:03:02,348 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-22T06:03:02,350 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:03:02,353 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-22T06:03:02,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,397 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-22T06:03:02,398 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-22T06:03:02,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-22T06:03:02,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-22T06:03:02,400 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-22T06:03:02,402 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:03:02,402 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-22T06:03:02,402 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255382402"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:02,402 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255382402"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:02,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:02,406 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:02,406 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:03:02,406 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:02,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-22T06:03:02,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:02,406 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1737f7a70c93ee364e2e424e900dc473, NAME => 'testtb-testExportWithResetTtl,,1732255360660.1737f7a70c93ee364e2e424e900dc473.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7651663bc25f23dec0bfbfb869dee282, NAME => 'testtb-testExportWithResetTtl,1,1732255360660.7651663bc25f23dec0bfbfb869dee282.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:03:02,407 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-22T06:03:02,407 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255382407"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:02,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-22T06:03:02,409 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-22T06:03:02,410 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-22T06:03:02,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 79 msec 2024-11-22T06:03:02,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-22T06:03:02,520 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-22T06:03:02,521 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-22T06:03:02,537 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-22T06:03:02,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-22T06:03:02,542 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-22T06:03:02,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-22T06:03:02,548 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-22T06:03:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-22T06:03:02,580 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=786 (was 774) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44239 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:57168 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:34539 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34539 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_469820863_1 at /127.0.0.1:57150 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 115312) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:37732 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2070 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_469820863_1 at /127.0.0.1:36848 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44381 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:38401 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:44174 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=622 (was 535) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 16) - ProcessCount LEAK? -, AvailableMemoryMB=4435 (was 5003) 2024-11-22T06:03:02,580 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-22T06:03:02,602 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=786, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=622, ProcessCount=17, AvailableMemoryMB=4435 2024-11-22T06:03:02,602 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-22T06:03:02,604 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:03:02,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:02,608 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:03:02,608 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:02,608 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-22T06:03:02,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-22T06:03:02,610 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:03:02,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741953_1129 (size=407) 2024-11-22T06:03:02,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741953_1129 (size=407) 2024-11-22T06:03:02,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741953_1129 (size=407) 2024-11-22T06:03:02,629 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 06320f6bd0745b236e940616916e0fcc, NAME => 'testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:02,631 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 831cf77fafd1a6284424d731d8a1fc44, NAME => 'testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:02,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741955_1131 (size=68) 2024-11-22T06:03:02,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741955_1131 (size=68) 2024-11-22T06:03:02,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741954_1130 (size=68) 2024-11-22T06:03:02,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741954_1130 (size=68) 2024-11-22T06:03:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741955_1131 (size=68) 2024-11-22T06:03:02,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741954_1130 (size=68) 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 06320f6bd0745b236e940616916e0fcc, disabling compactions & flushes 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 831cf77fafd1a6284424d731d8a1fc44, disabling compactions & flushes 2024-11-22T06:03:02,662 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:02,662 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. after waiting 0 ms 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. after waiting 0 ms 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:02,662 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:02,662 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 06320f6bd0745b236e940616916e0fcc: Waiting for close lock at 1732255382662Disabling compacts and flushes for region at 1732255382662Disabling writes for close at 1732255382662Writing region close event to WAL at 1732255382662Closed at 1732255382662 2024-11-22T06:03:02,662 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 831cf77fafd1a6284424d731d8a1fc44: Waiting for close lock at 1732255382662Disabling compacts and flushes for region at 1732255382662Disabling writes for close at 1732255382662Writing region close event to WAL at 1732255382662Closed at 1732255382662 2024-11-22T06:03:02,664 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:03:02,664 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732255382664"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255382664"}]},"ts":"1732255382664"} 2024-11-22T06:03:02,664 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732255382664"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255382664"}]},"ts":"1732255382664"} 2024-11-22T06:03:02,667 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:03:02,668 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:03:02,669 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255382669"}]},"ts":"1732255382669"} 2024-11-22T06:03:02,672 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-22T06:03:02,672 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:03:02,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:03:02,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:03:02,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:03:02,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:03:02,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:03:02,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:03:02,674 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:03:02,674 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:03:02,674 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:03:02,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:03:02,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, ASSIGN}] 2024-11-22T06:03:02,676 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, ASSIGN 2024-11-22T06:03:02,677 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, ASSIGN 2024-11-22T06:03:02,677 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, ASSIGN; state=OFFLINE, location=407411ea95db,41799,1732255321718; forceNewPlan=false, retain=false 2024-11-22T06:03:02,677 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:03:02,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-22T06:03:02,828 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:03:02,829 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=831cf77fafd1a6284424d731d8a1fc44, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:02,829 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=06320f6bd0745b236e940616916e0fcc, regionState=OPENING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:03:02,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, ASSIGN because future has completed 2024-11-22T06:03:02,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 06320f6bd0745b236e940616916e0fcc, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:03:02,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, ASSIGN because future has completed 2024-11-22T06:03:02,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 831cf77fafd1a6284424d731d8a1fc44, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:02,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-22T06:03:02,988 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:02,988 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => 06320f6bd0745b236e940616916e0fcc, NAME => 'testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:03:02,989 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. service=AccessControlService 2024-11-22T06:03:02,989 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:02,989 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,989 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:02,989 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,990 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,991 INFO [StoreOpener-06320f6bd0745b236e940616916e0fcc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,992 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:02,992 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => 831cf77fafd1a6284424d731d8a1fc44, NAME => 'testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:03:02,992 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. service=AccessControlService 2024-11-22T06:03:02,993 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:02,993 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,993 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:02,993 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,993 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,993 INFO [StoreOpener-06320f6bd0745b236e940616916e0fcc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06320f6bd0745b236e940616916e0fcc columnFamilyName cf 2024-11-22T06:03:02,994 DEBUG [StoreOpener-06320f6bd0745b236e940616916e0fcc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:02,994 INFO [StoreOpener-06320f6bd0745b236e940616916e0fcc-1 {}] regionserver.HStore(327): Store=06320f6bd0745b236e940616916e0fcc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:02,994 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,994 INFO [StoreOpener-831cf77fafd1a6284424d731d8a1fc44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,995 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,995 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,996 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,996 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,996 INFO [StoreOpener-831cf77fafd1a6284424d731d8a1fc44-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 831cf77fafd1a6284424d731d8a1fc44 columnFamilyName cf 2024-11-22T06:03:02,996 DEBUG [StoreOpener-831cf77fafd1a6284424d731d8a1fc44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:02,996 INFO [StoreOpener-831cf77fafd1a6284424d731d8a1fc44-1 {}] regionserver.HStore(327): Store=831cf77fafd1a6284424d731d8a1fc44/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:02,996 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,997 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,997 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:02,997 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,998 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:02,998 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:03,000 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:03,001 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened 06320f6bd0745b236e940616916e0fcc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73648520, jitterRate=0.09744846820831299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:03,001 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:03,001 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:03,002 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for 06320f6bd0745b236e940616916e0fcc: Running coprocessor pre-open hook at 1732255382990Writing region info on filesystem at 1732255382990Initializing all the Stores at 1732255382990Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255382990Cleaning up temporary data from old regions at 1732255382996 (+6 ms)Running coprocessor post-open hooks at 1732255383001 (+5 ms)Region opened successfully at 1732255383002 (+1 ms) 2024-11-22T06:03:03,003 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc., pid=61, masterSystemTime=1732255382985 2024-11-22T06:03:03,004 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:03,005 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened 831cf77fafd1a6284424d731d8a1fc44; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68693578, jitterRate=0.023614078760147095}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:03,005 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:03,005 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for 831cf77fafd1a6284424d731d8a1fc44: Running coprocessor pre-open hook at 1732255382993Writing region info on filesystem at 1732255382993Initializing all the Stores at 1732255382994 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255382994Cleaning up temporary data from old regions at 1732255382998 (+4 ms)Running coprocessor post-open hooks at 1732255383005 (+7 ms)Region opened successfully at 1732255383005 2024-11-22T06:03:03,007 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:03,007 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:03,008 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44., pid=62, masterSystemTime=1732255382989 2024-11-22T06:03:03,008 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=06320f6bd0745b236e940616916e0fcc, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:03:03,010 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:03,010 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:03,010 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=831cf77fafd1a6284424d731d8a1fc44, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:03,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 06320f6bd0745b236e940616916e0fcc, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:03:03,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 831cf77fafd1a6284424d731d8a1fc44, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:03,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-22T06:03:03,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure 06320f6bd0745b236e940616916e0fcc, server=407411ea95db,41799,1732255321718 in 182 msec 2024-11-22T06:03:03,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, ASSIGN in 341 msec 2024-11-22T06:03:03,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=60 2024-11-22T06:03:03,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure 831cf77fafd1a6284424d731d8a1fc44, server=407411ea95db,44619,1732255321573 in 183 msec 2024-11-22T06:03:03,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-11-22T06:03:03,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, ASSIGN in 342 msec 2024-11-22T06:03:03,021 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:03:03,021 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255383021"}]},"ts":"1732255383021"} 2024-11-22T06:03:03,024 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-22T06:03:03,025 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:03:03,025 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-22T06:03:03,029 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-22T06:03:03,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:03,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:03,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:03,056 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:03,064 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:03,064 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:03,064 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:03,065 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:03,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 462 msec 2024-11-22T06:03:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-22T06:03:03,239 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-22T06:03:03,240 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-11-22T06:03:03,240 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:03,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-11-22T06:03:03,244 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:03,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-11-22T06:03:03,244 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:03:03,249 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:03:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255383249 (current time:1732255383249). 2024-11-22T06:03:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:03:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-22T06:03:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:03:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75a2af54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:03,251 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:03,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:03,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:03,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59fcd657, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:03,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:03,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,253 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37604, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:03,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b990ccf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:03,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:03,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:03,257 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47544, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:03,258 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:03:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,259 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49ec215b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:03,260 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:03,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:03,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:03,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@720812e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:03,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:03,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,262 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37612, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:03,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@697bb2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:03,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:03,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:03,266 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:03,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:03:03,270 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:03:03,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:03,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,270 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:03,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-22T06:03:03,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:03:03,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:03:03,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-22T06:03:03,273 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:03:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-22T06:03:03,274 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:03:03,276 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:03:03,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741956_1132 (size=170) 2024-11-22T06:03:03,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741956_1132 (size=170) 2024-11-22T06:03:03,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741956_1132 (size=170) 2024-11-22T06:03:03,284 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:03:03,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44}] 2024-11-22T06:03:03,285 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:03,285 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:03,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-22T06:03:03,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-22T06:03:03,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for 06320f6bd0745b236e940616916e0fcc: 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for 831cf77fafd1a6284424d731d8a1fc44: 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. for emptySnaptb0-testExportFileSystemState completed. 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. for emptySnaptb0-testExportFileSystemState completed. 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:03,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:03,439 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:03:03,439 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:03:03,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741958_1134 (size=71) 2024-11-22T06:03:03,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741957_1133 (size=71) 2024-11-22T06:03:03,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741957_1133 (size=71) 2024-11-22T06:03:03,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741957_1133 (size=71) 2024-11-22T06:03:03,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:03,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741958_1134 (size=71) 2024-11-22T06:03:03,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-22T06:03:03,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741958_1134 (size=71) 2024-11-22T06:03:03,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:03,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-22T06:03:03,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-22T06:03:03,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-22T06:03:03,449 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:03,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:03,449 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:03,449 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:03,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc in 166 msec 2024-11-22T06:03:03,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-22T06:03:03,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44 in 166 msec 2024-11-22T06:03:03,452 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:03:03,453 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:03:03,453 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:03:03,454 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-22T06:03:03,454 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-22T06:03:03,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741959_1135 (size=552) 2024-11-22T06:03:03,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741959_1135 (size=552) 2024-11-22T06:03:03,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741959_1135 (size=552) 2024-11-22T06:03:03,470 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:03:03,475 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:03:03,476 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-22T06:03:03,478 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:03:03,478 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-22T06:03:03,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 207 msec 2024-11-22T06:03:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-22T06:03:03,590 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-22T06:03:03,597 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='0982050784be3c4c8fc1b3129213074c5', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:03:03,599 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='1995088fa265e7e09c621d0821c81a2ae', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:03,602 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='243767f97cc05c5a41df7ed0f4a2b9d94', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:03,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41799 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:03:03,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:03:03,614 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:03:03,618 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-22T06:03:03,618 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:03,618 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:03,621 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:03:03,632 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:03:03,647 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:03:03,651 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:03:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255383651 (current time:1732255383651). 2024-11-22T06:03:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:03:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-22T06:03:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:03:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@420875b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:03,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:03,653 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:03,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:03,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:03,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@897fe9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:03,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:03,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,655 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37624, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:03,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a5c5050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:03,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:03,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:03,659 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:03,660 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:03,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:03,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,661 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517fc4de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:03,663 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:03,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:03,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:03,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@778eaaf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:03,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:03,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,666 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:03,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ca6ef13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:03,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:03,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:03,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:03,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:03:03,674 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-22T06:03:03,676 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:03:03,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:03:03,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-22T06:03:03,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-22T06:03:03,680 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:03:03,681 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:03:03,685 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:03:03,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741960_1136 (size=165) 2024-11-22T06:03:03,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741960_1136 (size=165) 2024-11-22T06:03:03,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741960_1136 (size=165) 2024-11-22T06:03:03,707 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:03:03,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44}] 2024-11-22T06:03:03,708 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:03,708 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:03,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-22T06:03:03,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-22T06:03:03,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:03,862 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing 06320f6bd0745b236e940616916e0fcc 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-22T06:03:03,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-22T06:03:03,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:03,863 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing 831cf77fafd1a6284424d731d8a1fc44 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-22T06:03:03,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/.tmp/cf/988e7773dd4e443284fa41ecc7385934 is 71, key is 1a7cf67e9482ae5ecf12d57ccadbcb21/cf:q/1732255383611/Put/seqid=0 2024-11-22T06:03:03,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/.tmp/cf/95cf9ccbf913436680c7a2dc7c39aec3 is 71, key is 08608e0a73f2a0f3e4c4f30d7031192e/cf:q/1732255383608/Put/seqid=0 2024-11-22T06:03:03,948 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-22T06:03:03,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741961_1137 (size=8258) 2024-11-22T06:03:03,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741961_1137 (size=8258) 2024-11-22T06:03:03,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741961_1137 (size=8258) 2024-11-22T06:03:03,966 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/.tmp/cf/988e7773dd4e443284fa41ecc7385934 2024-11-22T06:03:03,977 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/.tmp/cf/988e7773dd4e443284fa41ecc7385934 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/cf/988e7773dd4e443284fa41ecc7385934 2024-11-22T06:03:03,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741962_1138 (size=5354) 2024-11-22T06:03:03,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741962_1138 (size=5354) 2024-11-22T06:03:03,985 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/.tmp/cf/95cf9ccbf913436680c7a2dc7c39aec3 2024-11-22T06:03:03,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741962_1138 (size=5354) 2024-11-22T06:03:03,987 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/cf/988e7773dd4e443284fa41ecc7385934, entries=46, sequenceid=6, filesize=8.1 K 2024-11-22T06:03:03,989 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 831cf77fafd1a6284424d731d8a1fc44 in 125ms, sequenceid=6, compaction requested=false 2024-11-22T06:03:03,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for 831cf77fafd1a6284424d731d8a1fc44: 2024-11-22T06:03:03,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. for snaptb0-testExportFileSystemState completed. 2024-11-22T06:03:03,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-22T06:03:03,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:03,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/cf/988e7773dd4e443284fa41ecc7385934] hfiles 2024-11-22T06:03:03,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/cf/988e7773dd4e443284fa41ecc7385934 for snapshot=snaptb0-testExportFileSystemState 2024-11-22T06:03:03,993 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/.tmp/cf/95cf9ccbf913436680c7a2dc7c39aec3 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/cf/95cf9ccbf913436680c7a2dc7c39aec3 2024-11-22T06:03:03,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-22T06:03:04,002 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/cf/95cf9ccbf913436680c7a2dc7c39aec3, entries=4, sequenceid=6, filesize=5.2 K 2024-11-22T06:03:04,004 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 06320f6bd0745b236e940616916e0fcc in 143ms, sequenceid=6, compaction requested=false 2024-11-22T06:03:04,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for 06320f6bd0745b236e940616916e0fcc: 2024-11-22T06:03:04,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. for snaptb0-testExportFileSystemState completed. 2024-11-22T06:03:04,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-22T06:03:04,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:04,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/cf/95cf9ccbf913436680c7a2dc7c39aec3] hfiles 2024-11-22T06:03:04,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/cf/95cf9ccbf913436680c7a2dc7c39aec3 for snapshot=snaptb0-testExportFileSystemState 2024-11-22T06:03:04,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741963_1139 (size=110) 2024-11-22T06:03:04,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741963_1139 (size=110) 2024-11-22T06:03:04,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741963_1139 (size=110) 2024-11-22T06:03:04,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:04,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-22T06:03:04,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-22T06:03:04,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:04,021 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:04,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 831cf77fafd1a6284424d731d8a1fc44 in 315 msec 2024-11-22T06:03:04,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741964_1140 (size=110) 2024-11-22T06:03:04,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741964_1140 (size=110) 2024-11-22T06:03:04,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741964_1140 (size=110) 2024-11-22T06:03:04,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:04,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-22T06:03:04,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-22T06:03:04,043 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:04,043 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:04,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=66 2024-11-22T06:03:04,045 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:03:04,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 06320f6bd0745b236e940616916e0fcc in 337 msec 2024-11-22T06:03:04,046 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:03:04,047 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:03:04,047 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-22T06:03:04,048 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-22T06:03:04,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741965_1141 (size=630) 2024-11-22T06:03:04,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741965_1141 (size=630) 2024-11-22T06:03:04,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741965_1141 (size=630) 2024-11-22T06:03:04,076 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:03:04,085 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:03:04,086 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-22T06:03:04,090 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:03:04,090 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-22T06:03:04,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 414 msec 2024-11-22T06:03:04,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-22T06:03:04,310 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-22T06:03:04,310 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310 2024-11-22T06:03:04,310 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:04,340 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:04,340 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-22T06:03:04,342 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:03:04,347 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-22T06:03:04,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741966_1142 (size=630) 2024-11-22T06:03:04,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741966_1142 (size=630) 2024-11-22T06:03:04,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741967_1143 (size=165) 2024-11-22T06:03:04,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741967_1143 (size=165) 2024-11-22T06:03:04,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741966_1142 (size=630) 2024-11-22T06:03:04,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741967_1143 (size=165) 2024-11-22T06:03:04,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:04,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:04,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:04,514 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c2a1af531af5dfa747044be7409c1d27 changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:03:04,522 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 06320f6bd0745b236e940616916e0fcc changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:03:04,523 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 831cf77fafd1a6284424d731d8a1fc44 changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:03:05,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-7172916184868907019.jar 2024-11-22T06:03:05,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-9551470219706837676.jar 2024-11-22T06:03:05,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:05,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:03:05,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:03:05,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:03:05,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:03:05,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:03:05,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:03:05,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:03:05,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:03:05,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:03:05,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:03:05,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:03:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:05,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741968_1144 (size=131440) 2024-11-22T06:03:05,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741968_1144 (size=131440) 2024-11-22T06:03:05,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741968_1144 (size=131440) 2024-11-22T06:03:06,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741969_1145 (size=4188619) 2024-11-22T06:03:06,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741969_1145 (size=4188619) 2024-11-22T06:03:06,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741969_1145 (size=4188619) 2024-11-22T06:03:06,209 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0002_000001 (auth:SIMPLE) from 127.0.0.1:51948 2024-11-22T06:03:06,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741970_1146 (size=1323991) 2024-11-22T06:03:06,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741970_1146 (size=1323991) 2024-11-22T06:03:06,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741970_1146 (size=1323991) 2024-11-22T06:03:06,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741971_1147 (size=903736) 2024-11-22T06:03:06,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741971_1147 (size=903736) 2024-11-22T06:03:06,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741971_1147 (size=903736) 2024-11-22T06:03:06,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741972_1148 (size=8360083) 2024-11-22T06:03:06,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741972_1148 (size=8360083) 2024-11-22T06:03:06,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741972_1148 (size=8360083) 2024-11-22T06:03:06,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741973_1149 (size=1877034) 2024-11-22T06:03:06,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741973_1149 (size=1877034) 2024-11-22T06:03:06,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741973_1149 (size=1877034) 2024-11-22T06:03:06,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741974_1150 (size=77835) 2024-11-22T06:03:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741974_1150 (size=77835) 2024-11-22T06:03:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741974_1150 (size=77835) 2024-11-22T06:03:06,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741975_1151 (size=30949) 2024-11-22T06:03:06,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741975_1151 (size=30949) 2024-11-22T06:03:06,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741975_1151 (size=30949) 2024-11-22T06:03:06,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741976_1152 (size=1597347) 2024-11-22T06:03:06,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741976_1152 (size=1597347) 2024-11-22T06:03:06,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741976_1152 (size=1597347) 2024-11-22T06:03:06,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741977_1153 (size=440656) 2024-11-22T06:03:06,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741977_1153 (size=440656) 2024-11-22T06:03:06,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741977_1153 (size=440656) 2024-11-22T06:03:06,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741978_1154 (size=4695811) 2024-11-22T06:03:06,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741978_1154 (size=4695811) 2024-11-22T06:03:06,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741978_1154 (size=4695811) 2024-11-22T06:03:06,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741979_1155 (size=232957) 2024-11-22T06:03:06,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741979_1155 (size=232957) 2024-11-22T06:03:06,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741979_1155 (size=232957) 2024-11-22T06:03:06,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741980_1156 (size=127628) 2024-11-22T06:03:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741980_1156 (size=127628) 2024-11-22T06:03:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741980_1156 (size=127628) 2024-11-22T06:03:06,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741981_1157 (size=6424744) 2024-11-22T06:03:06,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741981_1157 (size=6424744) 2024-11-22T06:03:06,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741981_1157 (size=6424744) 2024-11-22T06:03:06,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741982_1158 (size=20406) 2024-11-22T06:03:06,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741982_1158 (size=20406) 2024-11-22T06:03:06,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741982_1158 (size=20406) 2024-11-22T06:03:06,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741983_1159 (size=5175431) 2024-11-22T06:03:06,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741983_1159 (size=5175431) 2024-11-22T06:03:06,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741983_1159 (size=5175431) 2024-11-22T06:03:07,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741984_1160 (size=217634) 2024-11-22T06:03:07,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741984_1160 (size=217634) 2024-11-22T06:03:07,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741984_1160 (size=217634) 2024-11-22T06:03:07,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741985_1161 (size=1832290) 2024-11-22T06:03:07,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741985_1161 (size=1832290) 2024-11-22T06:03:07,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741985_1161 (size=1832290) 2024-11-22T06:03:07,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741986_1162 (size=322274) 2024-11-22T06:03:07,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741986_1162 (size=322274) 2024-11-22T06:03:07,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741986_1162 (size=322274) 2024-11-22T06:03:07,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741987_1163 (size=503880) 2024-11-22T06:03:07,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741987_1163 (size=503880) 2024-11-22T06:03:07,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741987_1163 (size=503880) 2024-11-22T06:03:07,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741988_1164 (size=29229) 2024-11-22T06:03:07,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741988_1164 (size=29229) 2024-11-22T06:03:07,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741988_1164 (size=29229) 2024-11-22T06:03:07,260 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:03:07,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741989_1165 (size=24096) 2024-11-22T06:03:07,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741989_1165 (size=24096) 2024-11-22T06:03:07,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741989_1165 (size=24096) 2024-11-22T06:03:07,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741990_1166 (size=111872) 2024-11-22T06:03:07,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741990_1166 (size=111872) 2024-11-22T06:03:07,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741990_1166 (size=111872) 2024-11-22T06:03:07,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741991_1167 (size=45609) 2024-11-22T06:03:07,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741991_1167 (size=45609) 2024-11-22T06:03:07,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741991_1167 (size=45609) 2024-11-22T06:03:07,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741992_1168 (size=136454) 2024-11-22T06:03:07,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741992_1168 (size=136454) 2024-11-22T06:03:07,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741992_1168 (size=136454) 2024-11-22T06:03:07,493 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:03:07,500 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-22T06:03:07,515 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:03:07,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741993_1169 (size=344) 2024-11-22T06:03:07,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741993_1169 (size=344) 2024-11-22T06:03:07,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741993_1169 (size=344) 2024-11-22T06:03:07,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741994_1170 (size=15) 2024-11-22T06:03:07,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741994_1170 (size=15) 2024-11-22T06:03:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741994_1170 (size=15) 2024-11-22T06:03:07,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741995_1171 (size=303738) 2024-11-22T06:03:07,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741995_1171 (size=303738) 2024-11-22T06:03:07,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741995_1171 (size=303738) 2024-11-22T06:03:07,653 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:03:07,653 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:03:08,069 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0003_000001 (auth:SIMPLE) from 127.0.0.1:51562 2024-11-22T06:03:11,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-22T06:03:11,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-22T06:03:11,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-22T06:03:11,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-22T06:03:11,307 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0002/container_1732255328437_0002_01_000001/launch_container.sh] 2024-11-22T06:03:11,307 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0002/container_1732255328437_0002_01_000001/container_tokens] 2024-11-22T06:03:11,307 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0002/container_1732255328437_0002_01_000001/sysfs] 2024-11-22T06:03:13,872 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0003_000001 (auth:SIMPLE) from 127.0.0.1:51828 2024-11-22T06:03:14,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741996_1172 (size=349388) 2024-11-22T06:03:14,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741996_1172 (size=349388) 2024-11-22T06:03:14,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741996_1172 (size=349388) 2024-11-22T06:03:16,094 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0003_000001 (auth:SIMPLE) from 127.0.0.1:39316 2024-11-22T06:03:16,630 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:03:19,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741997_1173 (size=8258) 2024-11-22T06:03:19,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741997_1173 (size=8258) 2024-11-22T06:03:19,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741997_1173 (size=8258) 2024-11-22T06:03:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741998_1174 (size=5354) 2024-11-22T06:03:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741998_1174 (size=5354) 2024-11-22T06:03:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741998_1174 (size=5354) 2024-11-22T06:03:19,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741999_1175 (size=17422) 2024-11-22T06:03:19,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741999_1175 (size=17422) 2024-11-22T06:03:19,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741999_1175 (size=17422) 2024-11-22T06:03:19,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742000_1176 (size=465) 2024-11-22T06:03:19,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742000_1176 (size=465) 2024-11-22T06:03:19,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742000_1176 (size=465) 2024-11-22T06:03:19,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742001_1177 (size=17422) 2024-11-22T06:03:19,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742001_1177 (size=17422) 2024-11-22T06:03:19,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742001_1177 (size=17422) 2024-11-22T06:03:19,556 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0003/container_1732255328437_0003_01_000002/launch_container.sh] 2024-11-22T06:03:19,557 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0003/container_1732255328437_0003_01_000002/container_tokens] 2024-11-22T06:03:19,557 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0003/container_1732255328437_0003_01_000002/sysfs] 2024-11-22T06:03:19,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742002_1178 (size=349388) 2024-11-22T06:03:19,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742002_1178 (size=349388) 2024-11-22T06:03:19,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742002_1178 (size=349388) 2024-11-22T06:03:19,577 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0003_000001 (auth:SIMPLE) from 127.0.0.1:39324 2024-11-22T06:03:20,813 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:03:20,814 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:03:20,821 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-22T06:03:20,821 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:03:20,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:03:20,822 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-22T06:03:20,822 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-22T06:03:20,822 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-22T06:03:20,822 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-22T06:03:20,823 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-22T06:03:20,823 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255384310/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-22T06:03:20,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-22T06:03:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:20,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-22T06:03:20,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255400832"}]},"ts":"1732255400832"} 2024-11-22T06:03:20,834 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-22T06:03:20,834 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-22T06:03:20,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-22T06:03:20,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, UNASSIGN}] 2024-11-22T06:03:20,838 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, UNASSIGN 2024-11-22T06:03:20,838 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, UNASSIGN 2024-11-22T06:03:20,838 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=06320f6bd0745b236e940616916e0fcc, regionState=CLOSING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:03:20,838 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=831cf77fafd1a6284424d731d8a1fc44, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:20,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, UNASSIGN because future has completed 2024-11-22T06:03:20,840 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:20,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 06320f6bd0745b236e940616916e0fcc, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:03:20,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, UNASSIGN because future has completed 2024-11-22T06:03:20,841 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:20,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 831cf77fafd1a6284424d731d8a1fc44, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:20,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-22T06:03:20,993 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:20,993 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:20,993 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing 06320f6bd0745b236e940616916e0fcc, disabling compactions & flushes 2024-11-22T06:03:20,993 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:20,993 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:20,994 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. after waiting 0 ms 2024-11-22T06:03:20,994 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:20,994 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:20,994 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:20,994 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing 831cf77fafd1a6284424d731d8a1fc44, disabling compactions & flushes 2024-11-22T06:03:20,994 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:20,994 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:20,994 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. after waiting 0 ms 2024-11-22T06:03:20,994 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:20,998 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:03:20,998 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:03:20,999 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:20,999 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:20,999 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44. 2024-11-22T06:03:20,999 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc. 2024-11-22T06:03:20,999 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for 06320f6bd0745b236e940616916e0fcc: Waiting for close lock at 1732255400993Running coprocessor pre-close hooks at 1732255400993Disabling compacts and flushes for region at 1732255400993Disabling writes for close at 1732255400994 (+1 ms)Writing region close event to WAL at 1732255400994Running coprocessor post-close hooks at 1732255400999 (+5 ms)Closed at 1732255400999 2024-11-22T06:03:20,999 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for 831cf77fafd1a6284424d731d8a1fc44: Waiting for close lock at 1732255400994Running coprocessor pre-close hooks at 1732255400994Disabling compacts and flushes for region at 1732255400994Disabling writes for close at 1732255400994Writing region close event to WAL at 1732255400995 (+1 ms)Running coprocessor post-close hooks at 1732255400999 (+4 ms)Closed at 1732255400999 2024-11-22T06:03:21,001 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed 831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:21,003 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=831cf77fafd1a6284424d731d8a1fc44, regionState=CLOSED 2024-11-22T06:03:21,003 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed 06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:21,004 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=06320f6bd0745b236e940616916e0fcc, regionState=CLOSED 2024-11-22T06:03:21,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 831cf77fafd1a6284424d731d8a1fc44, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:21,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 06320f6bd0745b236e940616916e0fcc, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:03:21,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=72 2024-11-22T06:03:21,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-22T06:03:21,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure 831cf77fafd1a6284424d731d8a1fc44, server=407411ea95db,44619,1732255321573 in 166 msec 2024-11-22T06:03:21,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure 06320f6bd0745b236e940616916e0fcc, server=407411ea95db,41799,1732255321718 in 167 msec 2024-11-22T06:03:21,013 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=831cf77fafd1a6284424d731d8a1fc44, UNASSIGN in 175 msec 2024-11-22T06:03:21,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=71, resume processing ppid=70 2024-11-22T06:03:21,015 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=06320f6bd0745b236e940616916e0fcc, UNASSIGN in 175 msec 2024-11-22T06:03:21,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-22T06:03:21,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 180 msec 2024-11-22T06:03:21,018 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255401018"}]},"ts":"1732255401018"} 2024-11-22T06:03:21,020 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-22T06:03:21,020 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-22T06:03:21,023 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 192 msec 2024-11-22T06:03:21,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-22T06:03:21,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-22T06:03:21,150 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-22T06:03:21,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-22T06:03:21,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:21,153 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:21,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-22T06:03:21,154 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:21,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-22T06:03:21,158 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:21,158 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:21,160 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/recovered.edits] 2024-11-22T06:03:21,160 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/recovered.edits] 2024-11-22T06:03:21,164 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/cf/988e7773dd4e443284fa41ecc7385934 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/cf/988e7773dd4e443284fa41ecc7385934 2024-11-22T06:03:21,164 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/cf/95cf9ccbf913436680c7a2dc7c39aec3 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/cf/95cf9ccbf913436680c7a2dc7c39aec3 2024-11-22T06:03:21,168 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44/recovered.edits/9.seqid 2024-11-22T06:03:21,168 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc/recovered.edits/9.seqid 2024-11-22T06:03:21,169 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/831cf77fafd1a6284424d731d8a1fc44 2024-11-22T06:03:21,169 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemState/06320f6bd0745b236e940616916e0fcc 2024-11-22T06:03:21,169 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-22T06:03:21,172 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:21,176 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-22T06:03:21,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,235 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,236 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-22T06:03:21,236 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-22T06:03:21,236 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-22T06:03:21,236 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-22T06:03:21,237 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-22T06:03:21,239 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:21,239 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-22T06:03:21,239 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255401239"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:21,240 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255401239"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:21,243 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:03:21,243 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 06320f6bd0745b236e940616916e0fcc, NAME => 'testtb-testExportFileSystemState,,1732255382603.06320f6bd0745b236e940616916e0fcc.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 831cf77fafd1a6284424d731d8a1fc44, NAME => 'testtb-testExportFileSystemState,1,1732255382603.831cf77fafd1a6284424d731d8a1fc44.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:03:21,244 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-22T06:03:21,244 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255401244"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:21,246 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-22T06:03:21,247 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-22T06:03:21,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 96 msec 2024-11-22T06:03:21,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,251 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,252 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-22T06:03:21,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-22T06:03:21,253 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-22T06:03:21,253 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-22T06:03:21,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-22T06:03:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-22T06:03:21,268 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-22T06:03:21,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-22T06:03:21,292 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=788 (was 786) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:42262 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 118619) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:55460 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:41825 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:47490 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1438115375_1 at /127.0.0.1:47484 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2780 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41825 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1438115375_1 at /127.0.0.1:42234 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=641 (was 622) - SystemLoadAverage LEAK? -, ProcessCount=16 (was 17), AvailableMemoryMB=4216 (was 4435) 2024-11-22T06:03:21,292 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-22T06:03:21,310 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=788, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=641, ProcessCount=16, AvailableMemoryMB=4211 2024-11-22T06:03:21,310 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-22T06:03:21,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:03:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:21,314 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:03:21,314 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:21,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-22T06:03:21,315 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:03:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-22T06:03:21,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742003_1179 (size=404) 2024-11-22T06:03:21,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742003_1179 (size=404) 2024-11-22T06:03:21,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742003_1179 (size=404) 2024-11-22T06:03:21,351 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8d0b98c5d9fb22de88ee6daabe26e0bb, NAME => 'testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:21,352 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6789b821e00450848828fd44c6971ecb, NAME => 'testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:21,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742005_1181 (size=65) 2024-11-22T06:03:21,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742005_1181 (size=65) 2024-11-22T06:03:21,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742005_1181 (size=65) 2024-11-22T06:03:21,371 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:21,371 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 6789b821e00450848828fd44c6971ecb, disabling compactions & flushes 2024-11-22T06:03:21,371 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:21,371 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:21,371 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. after waiting 0 ms 2024-11-22T06:03:21,371 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:21,371 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:21,371 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6789b821e00450848828fd44c6971ecb: Waiting for close lock at 1732255401371Disabling compacts and flushes for region at 1732255401371Disabling writes for close at 1732255401371Writing region close event to WAL at 1732255401371Closed at 1732255401371 2024-11-22T06:03:21,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742004_1180 (size=65) 2024-11-22T06:03:21,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742004_1180 (size=65) 2024-11-22T06:03:21,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742004_1180 (size=65) 2024-11-22T06:03:21,375 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:21,375 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 8d0b98c5d9fb22de88ee6daabe26e0bb, disabling compactions & flushes 2024-11-22T06:03:21,375 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:21,375 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:21,375 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. after waiting 0 ms 2024-11-22T06:03:21,375 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:21,375 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:21,375 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8d0b98c5d9fb22de88ee6daabe26e0bb: Waiting for close lock at 1732255401375Disabling compacts and flushes for region at 1732255401375Disabling writes for close at 1732255401375Writing region close event to WAL at 1732255401375Closed at 1732255401375 2024-11-22T06:03:21,377 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:03:21,377 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732255401377"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255401377"}]},"ts":"1732255401377"} 2024-11-22T06:03:21,377 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732255401377"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255401377"}]},"ts":"1732255401377"} 2024-11-22T06:03:21,380 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:03:21,382 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:03:21,382 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255401382"}]},"ts":"1732255401382"} 2024-11-22T06:03:21,384 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-22T06:03:21,384 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:03:21,386 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:03:21,386 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:03:21,386 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:03:21,386 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:03:21,386 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:03:21,386 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:03:21,386 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:03:21,386 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:03:21,386 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:03:21,386 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:03:21,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, ASSIGN}] 2024-11-22T06:03:21,388 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, ASSIGN 2024-11-22T06:03:21,388 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, ASSIGN 2024-11-22T06:03:21,389 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:03:21,389 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:03:21,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-22T06:03:21,539 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:03:21,540 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=8d0b98c5d9fb22de88ee6daabe26e0bb, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:21,540 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=6789b821e00450848828fd44c6971ecb, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:21,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, ASSIGN because future has completed 2024-11-22T06:03:21,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:03:21,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, ASSIGN because future has completed 2024-11-22T06:03:21,545 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6789b821e00450848828fd44c6971ecb, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:21,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-22T06:03:21,699 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:21,700 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => 8d0b98c5d9fb22de88ee6daabe26e0bb, NAME => 'testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:03:21,700 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. service=AccessControlService 2024-11-22T06:03:21,700 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:21,700 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,701 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:21,701 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,701 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,703 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:21,703 INFO [StoreOpener-8d0b98c5d9fb22de88ee6daabe26e0bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,703 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => 6789b821e00450848828fd44c6971ecb, NAME => 'testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:03:21,703 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. service=AccessControlService 2024-11-22T06:03:21,703 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:21,704 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,704 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:21,704 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,704 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,704 INFO [StoreOpener-8d0b98c5d9fb22de88ee6daabe26e0bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d0b98c5d9fb22de88ee6daabe26e0bb columnFamilyName cf 2024-11-22T06:03:21,704 DEBUG [StoreOpener-8d0b98c5d9fb22de88ee6daabe26e0bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:21,705 INFO [StoreOpener-8d0b98c5d9fb22de88ee6daabe26e0bb-1 {}] regionserver.HStore(327): Store=8d0b98c5d9fb22de88ee6daabe26e0bb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:21,705 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,705 INFO [StoreOpener-6789b821e00450848828fd44c6971ecb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,706 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,706 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,706 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,706 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,706 INFO [StoreOpener-6789b821e00450848828fd44c6971ecb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6789b821e00450848828fd44c6971ecb columnFamilyName cf 2024-11-22T06:03:21,707 DEBUG [StoreOpener-6789b821e00450848828fd44c6971ecb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:21,707 INFO [StoreOpener-6789b821e00450848828fd44c6971ecb-1 {}] regionserver.HStore(327): Store=6789b821e00450848828fd44c6971ecb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:21,707 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,708 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,708 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,708 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,708 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,709 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,710 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,711 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:21,711 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened 8d0b98c5d9fb22de88ee6daabe26e0bb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68781640, jitterRate=0.024926304817199707}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:21,711 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:21,712 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for 8d0b98c5d9fb22de88ee6daabe26e0bb: Running coprocessor pre-open hook at 1732255401701Writing region info on filesystem at 1732255401701Initializing all the Stores at 1732255401702 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255401702Cleaning up temporary data from old regions at 1732255401706 (+4 ms)Running coprocessor post-open hooks at 1732255401711 (+5 ms)Region opened successfully at 1732255401712 (+1 ms) 2024-11-22T06:03:21,712 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:21,713 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb., pid=79, masterSystemTime=1732255401695 2024-11-22T06:03:21,713 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened 6789b821e00450848828fd44c6971ecb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67555514, jitterRate=0.006655603647232056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:21,713 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:21,713 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for 6789b821e00450848828fd44c6971ecb: Running coprocessor pre-open hook at 1732255401704Writing region info on filesystem at 1732255401704Initializing all the Stores at 1732255401705 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255401705Cleaning up temporary data from old regions at 1732255401709 (+4 ms)Running coprocessor post-open hooks at 1732255401713 (+4 ms)Region opened successfully at 1732255401713 2024-11-22T06:03:21,714 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., pid=80, masterSystemTime=1732255401697 2024-11-22T06:03:21,716 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:21,716 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:21,716 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=8d0b98c5d9fb22de88ee6daabe26e0bb, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:21,717 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:21,717 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:21,718 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=6789b821e00450848828fd44c6971ecb, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:21,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:03:21,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6789b821e00450848828fd44c6971ecb, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:21,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=77 2024-11-22T06:03:21,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb, server=407411ea95db,35283,1732255321784 in 178 msec 2024-11-22T06:03:21,724 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=78 2024-11-22T06:03:21,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure 6789b821e00450848828fd44c6971ecb, server=407411ea95db,44619,1732255321573 in 178 msec 2024-11-22T06:03:21,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, ASSIGN in 337 msec 2024-11-22T06:03:21,726 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-11-22T06:03:21,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, ASSIGN in 339 msec 2024-11-22T06:03:21,727 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:03:21,728 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255401727"}]},"ts":"1732255401727"} 2024-11-22T06:03:21,729 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-22T06:03:21,730 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:03:21,730 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-22T06:03:21,734 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-22T06:03:21,824 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:21,835 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:21,835 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:21,835 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:21,835 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:21,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 523 msec 2024-11-22T06:03:21,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-22T06:03:21,940 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-22T06:03:21,940 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-11-22T06:03:21,940 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:21,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-11-22T06:03:21,945 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:21,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-11-22T06:03:21,945 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-22T06:03:21,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-22T06:03:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255401950 (current time:1732255401950). 2024-11-22T06:03:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:03:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-22T06:03:21,950 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-22T06:03:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:03:21,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a270baa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:21,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:21,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:21,952 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:21,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:21,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:21,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d1f4412, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:21,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:21,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:21,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:21,955 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43290, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:21,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@166a4270, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:21,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:21,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:21,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:21,958 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:21,960 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:21,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:21,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:21,960 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:21,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:21,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@200069c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:21,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:21,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:21,962 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:21,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:21,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:21,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c28105b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:21,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:21,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:21,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:21,964 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43300, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:21,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cf13d33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:21,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:21,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:21,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:21,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:21,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:03:21,975 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:21,975 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:21,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-22T06:03:21,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:03:21,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-22T06:03:21,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-22T06:03:21,979 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:03:21,981 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:03:21,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-22T06:03:21,984 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:03:21,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742006_1182 (size=161) 2024-11-22T06:03:22,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742006_1182 (size=161) 2024-11-22T06:03:22,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742006_1182 (size=161) 2024-11-22T06:03:22,003 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:03:22,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb}] 2024-11-22T06:03:22,004 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:22,004 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-22T06:03:22,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-22T06:03:22,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for 6789b821e00450848828fd44c6971ecb: 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for 8d0b98c5d9fb22de88ee6daabe26e0bb: 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. for emptySnaptb0-testConsecutiveExports completed. 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. for emptySnaptb0-testConsecutiveExports completed. 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:22,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:22,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:03:22,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:03:22,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742008_1184 (size=68) 2024-11-22T06:03:22,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742008_1184 (size=68) 2024-11-22T06:03:22,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742008_1184 (size=68) 2024-11-22T06:03:22,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:22,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-22T06:03:22,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-22T06:03:22,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:22,177 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:22,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb in 174 msec 2024-11-22T06:03:22,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742007_1183 (size=68) 2024-11-22T06:03:22,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742007_1183 (size=68) 2024-11-22T06:03:22,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742007_1183 (size=68) 2024-11-22T06:03:22,186 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:22,186 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-22T06:03:22,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-22T06:03:22,186 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:22,187 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:22,190 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=82, resume processing ppid=81 2024-11-22T06:03:22,190 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:03:22,190 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb in 184 msec 2024-11-22T06:03:22,191 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:03:22,191 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:03:22,191 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-22T06:03:22,192 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-22T06:03:22,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742009_1185 (size=543) 2024-11-22T06:03:22,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742009_1185 (size=543) 2024-11-22T06:03:22,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742009_1185 (size=543) 2024-11-22T06:03:22,211 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:03:22,218 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:03:22,219 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-22T06:03:22,221 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:03:22,221 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-22T06:03:22,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 245 msec 2024-11-22T06:03:22,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-22T06:03:22,300 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-22T06:03:22,314 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='00701451e9b8ae5eaa35a0f3d5d9a4646', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:03:22,323 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1f9a78695e036d973ea2edf49416d2e2d', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,325 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='353bedfb47744027a90ccb3b32d51e269', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,331 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:03:22,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:03:22,335 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='29815a08f4ed33bbf7eb9f2a822cd85d9', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,337 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='4c6d6bcfea14b9c629b7262b36205f547', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,338 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='5ed9a695ccc65f55877dd78ff5bfb5e76', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,339 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='6bec696e0af1a12306d3994c2d71fdf17', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,340 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='36968f35272f8ec4406a4faab2d14ad3', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,341 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='d81ead3ff39fec3ee0b2245a2304b548', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:22,343 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-22T06:03:22,347 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-22T06:03:22,347 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:22,348 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:22,351 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-22T06:03:22,359 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-22T06:03:22,369 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-22T06:03:22,373 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-22T06:03:22,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255402373 (current time:1732255402373). 2024-11-22T06:03:22,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:03:22,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-22T06:03:22,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:03:22,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57fbb48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:22,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:22,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:22,375 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:22,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:22,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:22,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2816d3c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:22,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:22,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:22,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:22,377 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:22,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d3b7194, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:22,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:22,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:22,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:22,381 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58418, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:22,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:22,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:22,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:22,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:22,383 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:22,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a31724a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:22,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:22,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:22,385 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:22,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:22,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:22,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65324603, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:22,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:22,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:22,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:22,387 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:22,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d03811, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:22,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:22,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:22,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:22,391 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58426, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:22,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:03:22,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-22T06:03:22,396 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:03:22,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-22T06:03:22,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-22T06:03:22,400 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:03:22,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-22T06:03:22,402 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:03:22,405 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:03:22,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742010_1186 (size=156) 2024-11-22T06:03:22,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742010_1186 (size=156) 2024-11-22T06:03:22,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742010_1186 (size=156) 2024-11-22T06:03:22,429 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:03:22,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb}] 2024-11-22T06:03:22,430 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:22,431 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:22,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-22T06:03:22,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-22T06:03:22,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:22,584 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing 8d0b98c5d9fb22de88ee6daabe26e0bb 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-22T06:03:22,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-22T06:03:22,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:22,585 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing 6789b821e00450848828fd44c6971ecb 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-22T06:03:22,634 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/.tmp/cf/02eab4ec2d894a3c82a094051683553d is 71, key is 05d0287553b98d8a3c5a8e051410ac2c/cf:q/1732255402331/Put/seqid=0 2024-11-22T06:03:22,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/.tmp/cf/031556690a7b41699be1130f8db0a966 is 71, key is 180e09493e878c1d065e548d230892be/cf:q/1732255402333/Put/seqid=0 2024-11-22T06:03:22,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742011_1187 (size=5356) 2024-11-22T06:03:22,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742011_1187 (size=5356) 2024-11-22T06:03:22,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742011_1187 (size=5356) 2024-11-22T06:03:22,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742012_1188 (size=8256) 2024-11-22T06:03:22,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742012_1188 (size=8256) 2024-11-22T06:03:22,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742012_1188 (size=8256) 2024-11-22T06:03:22,675 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/.tmp/cf/031556690a7b41699be1130f8db0a966 2024-11-22T06:03:22,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/.tmp/cf/031556690a7b41699be1130f8db0a966 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/cf/031556690a7b41699be1130f8db0a966 2024-11-22T06:03:22,700 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/cf/031556690a7b41699be1130f8db0a966, entries=46, sequenceid=6, filesize=8.1 K 2024-11-22T06:03:22,701 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 6789b821e00450848828fd44c6971ecb in 116ms, sequenceid=6, compaction requested=false 2024-11-22T06:03:22,701 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for 6789b821e00450848828fd44c6971ecb: 2024-11-22T06:03:22,701 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. for snaptb0-testConsecutiveExports completed. 2024-11-22T06:03:22,701 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-22T06:03:22,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:22,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/cf/031556690a7b41699be1130f8db0a966] hfiles 2024-11-22T06:03:22,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/cf/031556690a7b41699be1130f8db0a966 for snapshot=snaptb0-testConsecutiveExports 2024-11-22T06:03:22,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742013_1189 (size=107) 2024-11-22T06:03:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-22T06:03:22,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742013_1189 (size=107) 2024-11-22T06:03:22,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742013_1189 (size=107) 2024-11-22T06:03:22,725 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:22,725 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-22T06:03:22,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-22T06:03:22,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:22,726 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:22,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6789b821e00450848828fd44c6971ecb in 298 msec 2024-11-22T06:03:23,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-22T06:03:23,054 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/.tmp/cf/02eab4ec2d894a3c82a094051683553d 2024-11-22T06:03:23,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/.tmp/cf/02eab4ec2d894a3c82a094051683553d as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/cf/02eab4ec2d894a3c82a094051683553d 2024-11-22T06:03:23,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/cf/02eab4ec2d894a3c82a094051683553d, entries=4, sequenceid=6, filesize=5.2 K 2024-11-22T06:03:23,084 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 8d0b98c5d9fb22de88ee6daabe26e0bb in 500ms, sequenceid=6, compaction requested=false 2024-11-22T06:03:23,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for 8d0b98c5d9fb22de88ee6daabe26e0bb: 2024-11-22T06:03:23,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. for snaptb0-testConsecutiveExports completed. 2024-11-22T06:03:23,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-22T06:03:23,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:23,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/cf/02eab4ec2d894a3c82a094051683553d] hfiles 2024-11-22T06:03:23,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/cf/02eab4ec2d894a3c82a094051683553d for snapshot=snaptb0-testConsecutiveExports 2024-11-22T06:03:23,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742014_1190 (size=107) 2024-11-22T06:03:23,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742014_1190 (size=107) 2024-11-22T06:03:23,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742014_1190 (size=107) 2024-11-22T06:03:23,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:23,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-22T06:03:23,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-22T06:03:23,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:23,107 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:23,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=84 2024-11-22T06:03:23,111 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:03:23,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb in 679 msec 2024-11-22T06:03:23,113 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:03:23,115 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:03:23,115 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-22T06:03:23,116 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-22T06:03:23,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742015_1191 (size=621) 2024-11-22T06:03:23,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742015_1191 (size=621) 2024-11-22T06:03:23,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742015_1191 (size=621) 2024-11-22T06:03:23,139 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:03:23,166 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:03:23,167 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-22T06:03:23,170 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:03:23,171 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-22T06:03:23,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 774 msec 2024-11-22T06:03:23,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-22T06:03:23,540 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-22T06:03:23,540 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540 2024-11-22T06:03:23,540 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:23,568 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:23,568 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@888ac35, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-22T06:03:23,570 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:03:23,574 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-22T06:03:23,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:23,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:23,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-16147552560308526162.jar 2024-11-22T06:03:24,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-15363738143774462128.jar 2024-11-22T06:03:24,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,870 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,870 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,870 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,871 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:24,871 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:03:24,871 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:03:24,872 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:03:24,872 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:03:24,872 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:03:24,872 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:03:24,873 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:03:24,873 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:03:24,873 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:03:24,873 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:03:24,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:03:24,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:24,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:24,874 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:24,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:24,875 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:24,876 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:24,876 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:24,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742016_1192 (size=131440) 2024-11-22T06:03:24,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742016_1192 (size=131440) 2024-11-22T06:03:24,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742016_1192 (size=131440) 2024-11-22T06:03:24,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742017_1193 (size=4188619) 2024-11-22T06:03:24,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742017_1193 (size=4188619) 2024-11-22T06:03:24,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742017_1193 (size=4188619) 2024-11-22T06:03:25,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742018_1194 (size=1323991) 2024-11-22T06:03:25,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742018_1194 (size=1323991) 2024-11-22T06:03:25,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742018_1194 (size=1323991) 2024-11-22T06:03:25,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742019_1195 (size=440656) 2024-11-22T06:03:25,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742019_1195 (size=440656) 2024-11-22T06:03:25,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742019_1195 (size=440656) 2024-11-22T06:03:25,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742020_1196 (size=903736) 2024-11-22T06:03:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742020_1196 (size=903736) 2024-11-22T06:03:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742020_1196 (size=903736) 2024-11-22T06:03:25,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742021_1197 (size=8360083) 2024-11-22T06:03:25,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742021_1197 (size=8360083) 2024-11-22T06:03:25,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742021_1197 (size=8360083) 2024-11-22T06:03:25,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742022_1198 (size=1877034) 2024-11-22T06:03:25,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742022_1198 (size=1877034) 2024-11-22T06:03:25,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742022_1198 (size=1877034) 2024-11-22T06:03:25,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742023_1199 (size=77835) 2024-11-22T06:03:25,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742023_1199 (size=77835) 2024-11-22T06:03:25,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742023_1199 (size=77835) 2024-11-22T06:03:25,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742024_1200 (size=30949) 2024-11-22T06:03:25,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742024_1200 (size=30949) 2024-11-22T06:03:25,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742024_1200 (size=30949) 2024-11-22T06:03:25,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742025_1201 (size=1597347) 2024-11-22T06:03:25,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742025_1201 (size=1597347) 2024-11-22T06:03:25,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742025_1201 (size=1597347) 2024-11-22T06:03:25,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742026_1202 (size=4695811) 2024-11-22T06:03:25,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742026_1202 (size=4695811) 2024-11-22T06:03:25,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742026_1202 (size=4695811) 2024-11-22T06:03:25,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742027_1203 (size=232957) 2024-11-22T06:03:25,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742027_1203 (size=232957) 2024-11-22T06:03:25,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742027_1203 (size=232957) 2024-11-22T06:03:25,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742028_1204 (size=127628) 2024-11-22T06:03:25,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742028_1204 (size=127628) 2024-11-22T06:03:25,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742028_1204 (size=127628) 2024-11-22T06:03:25,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742029_1205 (size=20406) 2024-11-22T06:03:25,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742029_1205 (size=20406) 2024-11-22T06:03:25,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742029_1205 (size=20406) 2024-11-22T06:03:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742030_1206 (size=5175431) 2024-11-22T06:03:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742030_1206 (size=5175431) 2024-11-22T06:03:25,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742030_1206 (size=5175431) 2024-11-22T06:03:25,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742031_1207 (size=217634) 2024-11-22T06:03:25,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742031_1207 (size=217634) 2024-11-22T06:03:25,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742031_1207 (size=217634) 2024-11-22T06:03:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742032_1208 (size=1832290) 2024-11-22T06:03:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742032_1208 (size=1832290) 2024-11-22T06:03:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742032_1208 (size=1832290) 2024-11-22T06:03:25,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742033_1209 (size=322274) 2024-11-22T06:03:25,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742033_1209 (size=322274) 2024-11-22T06:03:25,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742033_1209 (size=322274) 2024-11-22T06:03:25,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742034_1210 (size=503880) 2024-11-22T06:03:25,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742034_1210 (size=503880) 2024-11-22T06:03:25,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742034_1210 (size=503880) 2024-11-22T06:03:25,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742035_1211 (size=6424744) 2024-11-22T06:03:25,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742035_1211 (size=6424744) 2024-11-22T06:03:25,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742035_1211 (size=6424744) 2024-11-22T06:03:25,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742036_1212 (size=29229) 2024-11-22T06:03:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742036_1212 (size=29229) 2024-11-22T06:03:25,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742036_1212 (size=29229) 2024-11-22T06:03:25,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742037_1213 (size=24096) 2024-11-22T06:03:25,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742037_1213 (size=24096) 2024-11-22T06:03:25,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742037_1213 (size=24096) 2024-11-22T06:03:25,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742038_1214 (size=111872) 2024-11-22T06:03:25,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742038_1214 (size=111872) 2024-11-22T06:03:25,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742038_1214 (size=111872) 2024-11-22T06:03:25,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742039_1215 (size=45609) 2024-11-22T06:03:25,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742039_1215 (size=45609) 2024-11-22T06:03:25,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742039_1215 (size=45609) 2024-11-22T06:03:25,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742040_1216 (size=136454) 2024-11-22T06:03:25,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742040_1216 (size=136454) 2024-11-22T06:03:25,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742040_1216 (size=136454) 2024-11-22T06:03:25,358 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:03:25,361 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-22T06:03:25,363 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:03:25,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742041_1217 (size=338) 2024-11-22T06:03:25,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742041_1217 (size=338) 2024-11-22T06:03:25,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742041_1217 (size=338) 2024-11-22T06:03:25,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742042_1218 (size=15) 2024-11-22T06:03:25,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742042_1218 (size=15) 2024-11-22T06:03:25,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742042_1218 (size=15) 2024-11-22T06:03:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742043_1219 (size=303781) 2024-11-22T06:03:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742043_1219 (size=303781) 2024-11-22T06:03:25,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742043_1219 (size=303781) 2024-11-22T06:03:25,696 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:03:25,696 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:03:25,701 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0003_000001 (auth:SIMPLE) from 127.0.0.1:54498 2024-11-22T06:03:25,720 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0003/container_1732255328437_0003_01_000001/launch_container.sh] 2024-11-22T06:03:25,720 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0003/container_1732255328437_0003_01_000001/container_tokens] 2024-11-22T06:03:25,720 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0003/container_1732255328437_0003_01_000001/sysfs] 2024-11-22T06:03:26,200 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0004_000001 (auth:SIMPLE) from 127.0.0.1:59584 2024-11-22T06:03:26,502 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:03:29,721 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:03:31,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-22T06:03:31,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-22T06:03:32,026 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0004_000001 (auth:SIMPLE) from 127.0.0.1:49100 2024-11-22T06:03:32,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742044_1220 (size=349431) 2024-11-22T06:03:32,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742044_1220 (size=349431) 2024-11-22T06:03:32,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742044_1220 (size=349431) 2024-11-22T06:03:34,300 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0004_000001 (auth:SIMPLE) from 127.0.0.1:36710 2024-11-22T06:03:37,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742045_1221 (size=17447) 2024-11-22T06:03:37,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742045_1221 (size=17447) 2024-11-22T06:03:37,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742045_1221 (size=17447) 2024-11-22T06:03:37,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742046_1222 (size=462) 2024-11-22T06:03:37,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742046_1222 (size=462) 2024-11-22T06:03:37,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742046_1222 (size=462) 2024-11-22T06:03:37,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742047_1223 (size=17447) 2024-11-22T06:03:37,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742047_1223 (size=17447) 2024-11-22T06:03:37,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742047_1223 (size=17447) 2024-11-22T06:03:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742048_1224 (size=349431) 2024-11-22T06:03:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742048_1224 (size=349431) 2024-11-22T06:03:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742048_1224 (size=349431) 2024-11-22T06:03:37,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0004/container_1732255328437_0004_01_000002/launch_container.sh] 2024-11-22T06:03:37,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0004/container_1732255328437_0004_01_000002/container_tokens] 2024-11-22T06:03:37,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0004/container_1732255328437_0004_01_000002/sysfs] 2024-11-22T06:03:37,270 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0004_000001 (auth:SIMPLE) from 127.0.0.1:36724 2024-11-22T06:03:38,609 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:03:38,609 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:03:38,612 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-22T06:03:38,612 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:03:38,612 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:03:38,612 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-22T06:03:38,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-22T06:03:38,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-22T06:03:38,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@888ac35 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-22T06:03:38,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-22T06:03:38,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-22T06:03:38,614 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:38,639 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:38,639 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@888ac35, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-22T06:03:38,641 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:03:38,646 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-22T06:03:38,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:38,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:38,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-4482879422672658068.jar 2024-11-22T06:03:39,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-9406050611628921532.jar 2024-11-22T06:03:39,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:03:39,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:03:39,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:03:39,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:03:39,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:03:39,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:03:39,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:03:39,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:03:39,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:03:39,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:03:39,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:03:39,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:03:39,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:39,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:39,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:39,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:39,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:03:39,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:39,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:03:39,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742049_1225 (size=131440) 2024-11-22T06:03:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742049_1225 (size=131440) 2024-11-22T06:03:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742049_1225 (size=131440) 2024-11-22T06:03:39,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742050_1226 (size=4188619) 2024-11-22T06:03:39,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742050_1226 (size=4188619) 2024-11-22T06:03:39,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742050_1226 (size=4188619) 2024-11-22T06:03:39,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742051_1227 (size=1323991) 2024-11-22T06:03:39,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742051_1227 (size=1323991) 2024-11-22T06:03:39,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742051_1227 (size=1323991) 2024-11-22T06:03:39,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742052_1228 (size=903736) 2024-11-22T06:03:39,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742052_1228 (size=903736) 2024-11-22T06:03:39,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742052_1228 (size=903736) 2024-11-22T06:03:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742053_1229 (size=8360083) 2024-11-22T06:03:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742053_1229 (size=8360083) 2024-11-22T06:03:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742053_1229 (size=8360083) 2024-11-22T06:03:39,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742054_1230 (size=1877034) 2024-11-22T06:03:39,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742054_1230 (size=1877034) 2024-11-22T06:03:39,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742054_1230 (size=1877034) 2024-11-22T06:03:39,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742055_1231 (size=77835) 2024-11-22T06:03:39,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742055_1231 (size=77835) 2024-11-22T06:03:39,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742055_1231 (size=77835) 2024-11-22T06:03:39,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742056_1232 (size=30949) 2024-11-22T06:03:39,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742056_1232 (size=30949) 2024-11-22T06:03:39,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742056_1232 (size=30949) 2024-11-22T06:03:39,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742057_1233 (size=1597347) 2024-11-22T06:03:39,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742057_1233 (size=1597347) 2024-11-22T06:03:39,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742057_1233 (size=1597347) 2024-11-22T06:03:39,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742058_1234 (size=440656) 2024-11-22T06:03:39,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742058_1234 (size=440656) 2024-11-22T06:03:39,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742058_1234 (size=440656) 2024-11-22T06:03:39,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742059_1235 (size=4695811) 2024-11-22T06:03:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742059_1235 (size=4695811) 2024-11-22T06:03:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742059_1235 (size=4695811) 2024-11-22T06:03:39,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742060_1236 (size=232957) 2024-11-22T06:03:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742060_1236 (size=232957) 2024-11-22T06:03:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742060_1236 (size=232957) 2024-11-22T06:03:39,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742061_1237 (size=127628) 2024-11-22T06:03:39,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742061_1237 (size=127628) 2024-11-22T06:03:39,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742061_1237 (size=127628) 2024-11-22T06:03:40,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742062_1238 (size=20406) 2024-11-22T06:03:40,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742062_1238 (size=20406) 2024-11-22T06:03:40,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742062_1238 (size=20406) 2024-11-22T06:03:40,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742063_1239 (size=5175431) 2024-11-22T06:03:40,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742063_1239 (size=5175431) 2024-11-22T06:03:40,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742063_1239 (size=5175431) 2024-11-22T06:03:40,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742064_1240 (size=217634) 2024-11-22T06:03:40,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742064_1240 (size=217634) 2024-11-22T06:03:40,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742064_1240 (size=217634) 2024-11-22T06:03:40,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742065_1241 (size=1832290) 2024-11-22T06:03:40,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742065_1241 (size=1832290) 2024-11-22T06:03:40,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742065_1241 (size=1832290) 2024-11-22T06:03:40,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742066_1242 (size=322274) 2024-11-22T06:03:40,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742066_1242 (size=322274) 2024-11-22T06:03:40,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742066_1242 (size=322274) 2024-11-22T06:03:40,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742067_1243 (size=503880) 2024-11-22T06:03:40,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742067_1243 (size=503880) 2024-11-22T06:03:40,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742067_1243 (size=503880) 2024-11-22T06:03:40,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742068_1244 (size=29229) 2024-11-22T06:03:40,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742068_1244 (size=29229) 2024-11-22T06:03:40,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742068_1244 (size=29229) 2024-11-22T06:03:40,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742069_1245 (size=24096) 2024-11-22T06:03:40,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742069_1245 (size=24096) 2024-11-22T06:03:40,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742069_1245 (size=24096) 2024-11-22T06:03:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742070_1246 (size=6424744) 2024-11-22T06:03:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742070_1246 (size=6424744) 2024-11-22T06:03:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742070_1246 (size=6424744) 2024-11-22T06:03:40,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742071_1247 (size=111872) 2024-11-22T06:03:40,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742071_1247 (size=111872) 2024-11-22T06:03:40,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742071_1247 (size=111872) 2024-11-22T06:03:40,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742072_1248 (size=45609) 2024-11-22T06:03:40,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742072_1248 (size=45609) 2024-11-22T06:03:40,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742072_1248 (size=45609) 2024-11-22T06:03:40,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742073_1249 (size=136454) 2024-11-22T06:03:40,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742073_1249 (size=136454) 2024-11-22T06:03:40,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742073_1249 (size=136454) 2024-11-22T06:03:40,535 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:03:40,538 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-22T06:03:40,540 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:03:40,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742074_1250 (size=338) 2024-11-22T06:03:40,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742074_1250 (size=338) 2024-11-22T06:03:40,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742074_1250 (size=338) 2024-11-22T06:03:40,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742075_1251 (size=15) 2024-11-22T06:03:40,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742075_1251 (size=15) 2024-11-22T06:03:40,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742075_1251 (size=15) 2024-11-22T06:03:40,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742076_1252 (size=303777) 2024-11-22T06:03:40,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742076_1252 (size=303777) 2024-11-22T06:03:40,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742076_1252 (size=303777) 2024-11-22T06:03:43,357 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:03:43,357 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:03:43,360 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0004_000001 (auth:SIMPLE) from 127.0.0.1:44112 2024-11-22T06:03:43,377 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0004/container_1732255328437_0004_01_000001/launch_container.sh] 2024-11-22T06:03:43,377 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0004/container_1732255328437_0004_01_000001/container_tokens] 2024-11-22T06:03:43,377 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0004/container_1732255328437_0004_01_000001/sysfs] 2024-11-22T06:03:43,705 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0005_000001 (auth:SIMPLE) from 127.0.0.1:43738 2024-11-22T06:03:49,358 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0005_000001 (auth:SIMPLE) from 127.0.0.1:46714 2024-11-22T06:03:49,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742077_1253 (size=349427) 2024-11-22T06:03:49,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742077_1253 (size=349427) 2024-11-22T06:03:49,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742077_1253 (size=349427) 2024-11-22T06:03:51,571 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0005_000001 (auth:SIMPLE) from 127.0.0.1:43750 2024-11-22T06:03:54,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742078_1254 (size=16925) 2024-11-22T06:03:54,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742078_1254 (size=16925) 2024-11-22T06:03:54,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742078_1254 (size=16925) 2024-11-22T06:03:54,390 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0005/container_1732255328437_0005_01_000002/launch_container.sh] 2024-11-22T06:03:54,390 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0005/container_1732255328437_0005_01_000002/container_tokens] 2024-11-22T06:03:54,390 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0005/container_1732255328437_0005_01_000002/sysfs] 2024-11-22T06:03:54,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742079_1255 (size=462) 2024-11-22T06:03:54,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742079_1255 (size=462) 2024-11-22T06:03:54,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742079_1255 (size=462) 2024-11-22T06:03:54,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742080_1256 (size=16925) 2024-11-22T06:03:54,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742080_1256 (size=16925) 2024-11-22T06:03:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742080_1256 (size=16925) 2024-11-22T06:03:54,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742081_1257 (size=349427) 2024-11-22T06:03:54,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742081_1257 (size=349427) 2024-11-22T06:03:54,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742081_1257 (size=349427) 2024-11-22T06:03:54,457 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0005_000001 (auth:SIMPLE) from 127.0.0.1:33092 2024-11-22T06:03:55,805 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:03:55,805 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:03:55,808 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-22T06:03:55,808 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:03:55,808 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:03:55,808 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-22T06:03:55,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-22T06:03:55,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-22T06:03:55,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@888ac35 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-22T06:03:55,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-22T06:03:55,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255403540/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-22T06:03:55,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-22T06:03:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-22T06:03:55,828 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255435827"}]},"ts":"1732255435827"} 2024-11-22T06:03:55,829 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-22T06:03:55,830 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-22T06:03:55,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-22T06:03:55,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, UNASSIGN}] 2024-11-22T06:03:55,833 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, UNASSIGN 2024-11-22T06:03:55,833 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, UNASSIGN 2024-11-22T06:03:55,834 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=6789b821e00450848828fd44c6971ecb, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:55,834 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=8d0b98c5d9fb22de88ee6daabe26e0bb, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:55,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, UNASSIGN because future has completed 2024-11-22T06:03:55,836 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:55,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6789b821e00450848828fd44c6971ecb, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:55,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, UNASSIGN because future has completed 2024-11-22T06:03:55,837 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:55,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:03:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-22T06:03:55,992 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:55,992 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:55,992 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing 6789b821e00450848828fd44c6971ecb, disabling compactions & flushes 2024-11-22T06:03:55,992 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:55,992 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:55,992 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. after waiting 0 ms 2024-11-22T06:03:55,992 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:55,992 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:55,992 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:03:55,992 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing 8d0b98c5d9fb22de88ee6daabe26e0bb, disabling compactions & flushes 2024-11-22T06:03:55,993 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:55,993 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:55,993 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. after waiting 0 ms 2024-11-22T06:03:55,993 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:56,000 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:03:56,000 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:03:56,000 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:56,001 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:56,001 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb. 2024-11-22T06:03:56,001 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb. 2024-11-22T06:03:56,001 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for 6789b821e00450848828fd44c6971ecb: Waiting for close lock at 1732255435992Running coprocessor pre-close hooks at 1732255435992Disabling compacts and flushes for region at 1732255435992Disabling writes for close at 1732255435992Writing region close event to WAL at 1732255435993 (+1 ms)Running coprocessor post-close hooks at 1732255436000 (+7 ms)Closed at 1732255436001 (+1 ms) 2024-11-22T06:03:56,001 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for 8d0b98c5d9fb22de88ee6daabe26e0bb: Waiting for close lock at 1732255435992Running coprocessor pre-close hooks at 1732255435992Disabling compacts and flushes for region at 1732255435992Disabling writes for close at 1732255435993 (+1 ms)Writing region close event to WAL at 1732255435995 (+2 ms)Running coprocessor post-close hooks at 1732255436001 (+6 ms)Closed at 1732255436001 2024-11-22T06:03:56,003 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed 6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:56,003 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=6789b821e00450848828fd44c6971ecb, regionState=CLOSED 2024-11-22T06:03:56,003 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed 8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:56,005 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=8d0b98c5d9fb22de88ee6daabe26e0bb, regionState=CLOSED 2024-11-22T06:03:56,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6789b821e00450848828fd44c6971ecb, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:56,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:03:56,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-11-22T06:03:56,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure 6789b821e00450848828fd44c6971ecb, server=407411ea95db,44619,1732255321573 in 171 msec 2024-11-22T06:03:56,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=89 2024-11-22T06:03:56,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure 8d0b98c5d9fb22de88ee6daabe26e0bb, server=407411ea95db,35283,1732255321784 in 171 msec 2024-11-22T06:03:56,010 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6789b821e00450848828fd44c6971ecb, UNASSIGN in 177 msec 2024-11-22T06:03:56,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=89, resume processing ppid=88 2024-11-22T06:03:56,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8d0b98c5d9fb22de88ee6daabe26e0bb, UNASSIGN in 178 msec 2024-11-22T06:03:56,013 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-22T06:03:56,013 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 182 msec 2024-11-22T06:03:56,015 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255436015"}]},"ts":"1732255436015"} 2024-11-22T06:03:56,017 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-22T06:03:56,017 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-22T06:03:56,022 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 194 msec 2024-11-22T06:03:56,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-22T06:03:56,140 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-22T06:03:56,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-22T06:03:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:56,143 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:56,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-22T06:03:56,144 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:56,147 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-22T06:03:56,149 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:56,149 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:56,151 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/recovered.edits] 2024-11-22T06:03:56,151 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/recovered.edits] 2024-11-22T06:03:56,160 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/cf/031556690a7b41699be1130f8db0a966 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/cf/031556690a7b41699be1130f8db0a966 2024-11-22T06:03:56,161 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/cf/02eab4ec2d894a3c82a094051683553d to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/cf/02eab4ec2d894a3c82a094051683553d 2024-11-22T06:03:56,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,163 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-22T06:03:56,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-22T06:03:56,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-22T06:03:56,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-22T06:03:56,166 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb/recovered.edits/9.seqid 2024-11-22T06:03:56,167 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb/recovered.edits/9.seqid 2024-11-22T06:03:56,167 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/8d0b98c5d9fb22de88ee6daabe26e0bb 2024-11-22T06:03:56,167 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testConsecutiveExports/6789b821e00450848828fd44c6971ecb 2024-11-22T06:03:56,168 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-22T06:03:56,170 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:56,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,171 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,171 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:56,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-22T06:03:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:56,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-22T06:03:56,173 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-22T06:03:56,176 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-22T06:03:56,178 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:56,178 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-22T06:03:56,178 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255436178"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:56,178 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255436178"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:56,181 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:03:56,181 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 8d0b98c5d9fb22de88ee6daabe26e0bb, NAME => 'testtb-testConsecutiveExports,,1732255401311.8d0b98c5d9fb22de88ee6daabe26e0bb.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6789b821e00450848828fd44c6971ecb, NAME => 'testtb-testConsecutiveExports,1,1732255401311.6789b821e00450848828fd44c6971ecb.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:03:56,181 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-22T06:03:56,181 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255436181"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:56,183 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-22T06:03:56,184 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-22T06:03:56,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 44 msec 2024-11-22T06:03:56,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-22T06:03:56,280 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-22T06:03:56,280 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-22T06:03:56,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-22T06:03:56,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-22T06:03:56,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-22T06:03:56,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-22T06:03:56,309 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=790 (was 788) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:47932 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:56976 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_260949828_1 at /127.0.0.1:56956 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 123702) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:45515 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45515 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3825 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_260949828_1 at /127.0.0.1:41634 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=799 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=614 (was 641), ProcessCount=16 (was 16), AvailableMemoryMB=4036 (was 4211) 2024-11-22T06:03:56,310 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-11-22T06:03:56,324 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=790, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=614, ProcessCount=16, AvailableMemoryMB=4035 2024-11-22T06:03:56,324 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-11-22T06:03:56,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:03:56,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:56,327 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:03:56,327 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:56,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-22T06:03:56,328 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:03:56,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-22T06:03:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-22T06:03:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742082_1258 (size=422) 2024-11-22T06:03:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742082_1258 (size=422) 2024-11-22T06:03:56,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742082_1258 (size=422) 2024-11-22T06:03:56,616 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 22cbb4718a753f944b86f8974eee4ebe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:56,617 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9c5d3a15b43735703c39879cab4ee61a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:56,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742083_1259 (size=83) 2024-11-22T06:03:56,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742084_1260 (size=83) 2024-11-22T06:03:56,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742084_1260 (size=83) 2024-11-22T06:03:56,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742084_1260 (size=83) 2024-11-22T06:03:56,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742083_1259 (size=83) 2024-11-22T06:03:56,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742083_1259 (size=83) 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 22cbb4718a753f944b86f8974eee4ebe, disabling compactions & flushes 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 9c5d3a15b43735703c39879cab4ee61a, disabling compactions & flushes 2024-11-22T06:03:56,630 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:56,630 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. after waiting 0 ms 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. after waiting 0 ms 2024-11-22T06:03:56,630 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 22cbb4718a753f944b86f8974eee4ebe: Waiting for close lock at 1732255436630Disabling compacts and flushes for region at 1732255436630Disabling writes for close at 1732255436630Writing region close event to WAL at 1732255436630Closed at 1732255436630 2024-11-22T06:03:56,630 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:56,630 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9c5d3a15b43735703c39879cab4ee61a: Waiting for close lock at 1732255436630Disabling compacts and flushes for region at 1732255436630Disabling writes for close at 1732255436630Writing region close event to WAL at 1732255436630Closed at 1732255436630 2024-11-22T06:03:56,631 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:03:56,631 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732255436631"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255436631"}]},"ts":"1732255436631"} 2024-11-22T06:03:56,631 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732255436631"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255436631"}]},"ts":"1732255436631"} 2024-11-22T06:03:56,633 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:03:56,634 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:03:56,634 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255436634"}]},"ts":"1732255436634"} 2024-11-22T06:03:56,636 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-22T06:03:56,636 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:03:56,637 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:03:56,637 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:03:56,637 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:03:56,637 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:03:56,638 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:03:56,638 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:03:56,638 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:03:56,638 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:03:56,638 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:03:56,638 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:03:56,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, ASSIGN}] 2024-11-22T06:03:56,639 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, ASSIGN 2024-11-22T06:03:56,639 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, ASSIGN 2024-11-22T06:03:56,639 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:03:56,639 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:03:56,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-22T06:03:56,790 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:03:56,790 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=22cbb4718a753f944b86f8974eee4ebe, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:56,790 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=9c5d3a15b43735703c39879cab4ee61a, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:56,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, ASSIGN because future has completed 2024-11-22T06:03:56,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22cbb4718a753f944b86f8974eee4ebe, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:56,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, ASSIGN because future has completed 2024-11-22T06:03:56,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c5d3a15b43735703c39879cab4ee61a, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:03:56,947 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:56,947 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:56,947 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => 9c5d3a15b43735703c39879cab4ee61a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:03:56,947 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => 22cbb4718a753f944b86f8974eee4ebe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. service=AccessControlService 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. service=AccessControlService 2024-11-22T06:03:56,948 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:56,948 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,948 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,949 INFO [StoreOpener-9c5d3a15b43735703c39879cab4ee61a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,950 INFO [StoreOpener-22cbb4718a753f944b86f8974eee4ebe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,951 INFO [StoreOpener-9c5d3a15b43735703c39879cab4ee61a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c5d3a15b43735703c39879cab4ee61a columnFamilyName cf 2024-11-22T06:03:56,951 INFO [StoreOpener-22cbb4718a753f944b86f8974eee4ebe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 22cbb4718a753f944b86f8974eee4ebe columnFamilyName cf 2024-11-22T06:03:56,951 DEBUG [StoreOpener-22cbb4718a753f944b86f8974eee4ebe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:56,951 DEBUG [StoreOpener-9c5d3a15b43735703c39879cab4ee61a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:56,952 INFO [StoreOpener-9c5d3a15b43735703c39879cab4ee61a-1 {}] regionserver.HStore(327): Store=9c5d3a15b43735703c39879cab4ee61a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:56,952 INFO [StoreOpener-22cbb4718a753f944b86f8974eee4ebe-1 {}] regionserver.HStore(327): Store=22cbb4718a753f944b86f8974eee4ebe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:56,952 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,952 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,952 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,953 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,953 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,953 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,953 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,953 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,953 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,953 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,954 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,955 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,956 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:56,957 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened 9c5d3a15b43735703c39879cab4ee61a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67318629, jitterRate=0.0031257420778274536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:56,957 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:56,957 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:56,958 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened 22cbb4718a753f944b86f8974eee4ebe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66433830, jitterRate=-0.010058790445327759}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:56,958 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:56,958 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for 22cbb4718a753f944b86f8974eee4ebe: Running coprocessor pre-open hook at 1732255436948Writing region info on filesystem at 1732255436948Initializing all the Stores at 1732255436949 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255436949Cleaning up temporary data from old regions at 1732255436953 (+4 ms)Running coprocessor post-open hooks at 1732255436958 (+5 ms)Region opened successfully at 1732255436958 2024-11-22T06:03:56,958 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for 9c5d3a15b43735703c39879cab4ee61a: Running coprocessor pre-open hook at 1732255436948Writing region info on filesystem at 1732255436948Initializing all the Stores at 1732255436949 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255436949Cleaning up temporary data from old regions at 1732255436953 (+4 ms)Running coprocessor post-open hooks at 1732255436957 (+4 ms)Region opened successfully at 1732255436958 (+1 ms) 2024-11-22T06:03:56,959 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe., pid=97, masterSystemTime=1732255436944 2024-11-22T06:03:56,959 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a., pid=98, masterSystemTime=1732255436944 2024-11-22T06:03:56,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-22T06:03:56,961 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:56,961 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:56,962 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=22cbb4718a753f944b86f8974eee4ebe, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:56,962 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:56,962 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:56,962 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=9c5d3a15b43735703c39879cab4ee61a, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:56,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22cbb4718a753f944b86f8974eee4ebe, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:56,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c5d3a15b43735703c39879cab4ee61a, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:03:56,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=95 2024-11-22T06:03:56,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure 22cbb4718a753f944b86f8974eee4ebe, server=407411ea95db,44619,1732255321573 in 172 msec 2024-11-22T06:03:56,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-11-22T06:03:56,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure 9c5d3a15b43735703c39879cab4ee61a, server=407411ea95db,35283,1732255321784 in 172 msec 2024-11-22T06:03:56,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, ASSIGN in 328 msec 2024-11-22T06:03:56,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=96, resume processing ppid=94 2024-11-22T06:03:56,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, ASSIGN in 329 msec 2024-11-22T06:03:56,969 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:03:56,970 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255436969"}]},"ts":"1732255436969"} 2024-11-22T06:03:56,971 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-22T06:03:56,972 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:03:56,972 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-22T06:03:56,975 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-22T06:03:57,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:57,002 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:57,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:57,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:57,013 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:57,014 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:57,014 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:57,014 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:57,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 689 msec 2024-11-22T06:03:57,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-22T06:03:57,470 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-22T06:03:57,470 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-11-22T06:03:57,470 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:57,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-11-22T06:03:57,475 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:57,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-11-22T06:03:57,476 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-22T06:03:57,479 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-22T06:03:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255437479 (current time:1732255437479). 2024-11-22T06:03:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:03:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-22T06:03:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:03:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@526fda2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:57,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:57,482 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:57,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:57,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:57,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d336d1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:57,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:57,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,484 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59728, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:57,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30ee5846, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:57,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:57,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:57,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52898, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:57,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,489 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ed160ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:57,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:57,491 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:57,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:57,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:57,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7795920, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:57,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:57,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,493 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59742, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:57,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bbea778, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:57,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:57,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:57,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52904, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:57,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:03:57,499 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:57,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,500 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-22T06:03:57,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:03:57,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-22T06:03:57,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-22T06:03:57,502 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:03:57,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-22T06:03:57,503 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:03:57,505 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:03:57,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742085_1261 (size=215) 2024-11-22T06:03:57,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742085_1261 (size=215) 2024-11-22T06:03:57,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742085_1261 (size=215) 2024-11-22T06:03:57,511 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:03:57,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a}] 2024-11-22T06:03:57,512 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:57,512 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:57,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-22T06:03:57,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-22T06:03:57,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-22T06:03:57,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:57,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for 22cbb4718a753f944b86f8974eee4ebe: 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for 9c5d3a15b43735703c39879cab4ee61a: 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:57,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:57,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:03:57,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742086_1262 (size=86) 2024-11-22T06:03:57,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742086_1262 (size=86) 2024-11-22T06:03:57,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742086_1262 (size=86) 2024-11-22T06:03:57,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-22T06:03:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-22T06:03:57,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:57,681 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:57,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe in 171 msec 2024-11-22T06:03:57,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742087_1263 (size=86) 2024-11-22T06:03:57,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742087_1263 (size=86) 2024-11-22T06:03:57,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742087_1263 (size=86) 2024-11-22T06:03:57,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:57,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-22T06:03:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-22T06:03:57,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:57,685 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:57,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=101, resume processing ppid=99 2024-11-22T06:03:57,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a in 175 msec 2024-11-22T06:03:57,688 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:03:57,688 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:03:57,689 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:03:57,689 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:57,690 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:57,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742088_1264 (size=597) 2024-11-22T06:03:57,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742088_1264 (size=597) 2024-11-22T06:03:57,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742088_1264 (size=597) 2024-11-22T06:03:57,701 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:03:57,706 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:03:57,707 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:57,709 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:03:57,709 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-22T06:03:57,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 208 msec 2024-11-22T06:03:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-22T06:03:57,821 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-22T06:03:57,829 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='0004f2a354988cf1e121c5fbec2b2a5ec', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:57,830 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='1472ba0ab48092bd559f7f219d0664c5c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:03:57,832 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='2bcc451dbc0115631c76a52593a2cb647', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:03:57,833 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='362991bd57d4bf41404e99eea81674e7b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:03:57,833 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='4ca64ed36a778a147c0a125dab0053215', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:03:57,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:03:57,837 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:03:57,839 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-22T06:03:57,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:57,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:57,841 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:03:57,843 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-22T06:03:57,847 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-22T06:03:57,851 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-22T06:03:57,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-22T06:03:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255437853 (current time:1732255437853). 2024-11-22T06:03:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:03:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-22T06:03:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:03:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@484ac8c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:57,855 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:57,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:57,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:57,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d6ef0e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:57,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:57,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,856 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:57,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1954fd7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:57,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:57,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:57,859 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52908, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:57,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,861 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@672b3dc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:03:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:03:57,862 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:03:57,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:03:57,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:03:57,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a9af916, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:03:57,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:03:57,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,864 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:03:57,864 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53bfcc5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:03:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:03:57,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:03:57,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:03:57,867 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:03:57,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:03:57,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:03:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:03:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:03:57,871 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:03:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-22T06:03:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:03:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-22T06:03:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-22T06:03:57,874 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:03:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-22T06:03:57,875 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:03:57,879 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:03:57,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742089_1265 (size=210) 2024-11-22T06:03:57,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742089_1265 (size=210) 2024-11-22T06:03:57,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742089_1265 (size=210) 2024-11-22T06:03:57,888 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:03:57,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a}] 2024-11-22T06:03:57,889 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:57,889 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:57,952 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-22T06:03:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-22T06:03:58,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-22T06:03:58,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-22T06:03:58,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:58,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:58,042 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing 22cbb4718a753f944b86f8974eee4ebe 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-22T06:03:58,042 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing 9c5d3a15b43735703c39879cab4ee61a 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-22T06:03:58,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/.tmp/cf/022d71d7754f4b2cb4336a81a04f7560 is 71, key is 10f09fed3ebcbd328de1bc8aff5c4014/cf:q/1732255437837/Put/seqid=0 2024-11-22T06:03:58,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/.tmp/cf/48bae1a3c05642bcb4bd7029582c9122 is 69, key is 0004f2a354988cf1e121c5fbec2b2a5ec/cf:q/1732255437835/Put/seqid=0 2024-11-22T06:03:58,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742090_1266 (size=8460) 2024-11-22T06:03:58,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742090_1266 (size=8460) 2024-11-22T06:03:58,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742090_1266 (size=8460) 2024-11-22T06:03:58,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/.tmp/cf/022d71d7754f4b2cb4336a81a04f7560 2024-11-22T06:03:58,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742091_1267 (size=5149) 2024-11-22T06:03:58,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742091_1267 (size=5149) 2024-11-22T06:03:58,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742091_1267 (size=5149) 2024-11-22T06:03:58,082 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/.tmp/cf/48bae1a3c05642bcb4bd7029582c9122 2024-11-22T06:03:58,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/.tmp/cf/022d71d7754f4b2cb4336a81a04f7560 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/cf/022d71d7754f4b2cb4336a81a04f7560 2024-11-22T06:03:58,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/.tmp/cf/48bae1a3c05642bcb4bd7029582c9122 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/cf/48bae1a3c05642bcb4bd7029582c9122 2024-11-22T06:03:58,093 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/cf/022d71d7754f4b2cb4336a81a04f7560, entries=49, sequenceid=6, filesize=8.3 K 2024-11-22T06:03:58,094 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 9c5d3a15b43735703c39879cab4ee61a in 53ms, sequenceid=6, compaction requested=false 2024-11-22T06:03:58,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for 9c5d3a15b43735703c39879cab4ee61a: 2024-11-22T06:03:58,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-22T06:03:58,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:58,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:58,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/cf/022d71d7754f4b2cb4336a81a04f7560] hfiles 2024-11-22T06:03:58,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/cf/022d71d7754f4b2cb4336a81a04f7560 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:58,095 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/cf/48bae1a3c05642bcb4bd7029582c9122, entries=1, sequenceid=6, filesize=5.0 K 2024-11-22T06:03:58,096 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 22cbb4718a753f944b86f8974eee4ebe in 55ms, sequenceid=6, compaction requested=false 2024-11-22T06:03:58,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 22cbb4718a753f944b86f8974eee4ebe: 2024-11-22T06:03:58,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-22T06:03:58,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:58,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:03:58,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/cf/48bae1a3c05642bcb4bd7029582c9122] hfiles 2024-11-22T06:03:58,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/cf/48bae1a3c05642bcb4bd7029582c9122 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:58,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742092_1268 (size=125) 2024-11-22T06:03:58,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742092_1268 (size=125) 2024-11-22T06:03:58,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742092_1268 (size=125) 2024-11-22T06:03:58,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:03:58,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-22T06:03:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-22T06:03:58,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:58,106 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:03:58,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9c5d3a15b43735703c39879cab4ee61a in 219 msec 2024-11-22T06:03:58,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742093_1269 (size=125) 2024-11-22T06:03:58,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742093_1269 (size=125) 2024-11-22T06:03:58,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742093_1269 (size=125) 2024-11-22T06:03:58,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:03:58,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-22T06:03:58,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-22T06:03:58,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:58,114 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:03:58,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=102 2024-11-22T06:03:58,116 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:03:58,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 22cbb4718a753f944b86f8974eee4ebe in 226 msec 2024-11-22T06:03:58,117 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:03:58,117 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:03:58,117 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:58,118 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:58,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742094_1270 (size=675) 2024-11-22T06:03:58,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742094_1270 (size=675) 2024-11-22T06:03:58,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742094_1270 (size=675) 2024-11-22T06:03:58,140 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:03:58,147 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:03:58,148 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:03:58,150 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:03:58,150 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-22T06:03:58,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 278 msec 2024-11-22T06:03:58,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-22T06:03:58,189 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-22T06:03:58,214 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T06:03:58,215 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T06:03:58,216 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T06:03:58,216 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T06:03:58,216 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T06:03:58,217 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T06:03:58,218 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-22T06:03:58,218 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41799 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-22T06:03:58,218 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35283 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-22T06:03:58,220 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:03:58,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:03:58,222 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:03:58,222 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:58,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-22T06:03:58,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-22T06:03:58,223 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:03:58,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742095_1271 (size=399) 2024-11-22T06:03:58,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742095_1271 (size=399) 2024-11-22T06:03:58,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742095_1271 (size=399) 2024-11-22T06:03:58,232 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dc79e43589f33117575a322062ec5651, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:58,236 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7cf525f0dcbe7849cf98c1a30246cf62, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:03:58,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742096_1272 (size=85) 2024-11-22T06:03:58,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742096_1272 (size=85) 2024-11-22T06:03:58,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742096_1272 (size=85) 2024-11-22T06:03:58,245 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:58,245 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing dc79e43589f33117575a322062ec5651, disabling compactions & flushes 2024-11-22T06:03:58,245 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:58,245 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:58,245 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. after waiting 0 ms 2024-11-22T06:03:58,245 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:58,245 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:58,245 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for dc79e43589f33117575a322062ec5651: Waiting for close lock at 1732255438245Disabling compacts and flushes for region at 1732255438245Disabling writes for close at 1732255438245Writing region close event to WAL at 1732255438245Closed at 1732255438245 2024-11-22T06:03:58,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742097_1273 (size=85) 2024-11-22T06:03:58,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742097_1273 (size=85) 2024-11-22T06:03:58,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742097_1273 (size=85) 2024-11-22T06:03:58,249 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:58,249 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 7cf525f0dcbe7849cf98c1a30246cf62, disabling compactions & flushes 2024-11-22T06:03:58,249 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:58,249 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:58,249 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. after waiting 0 ms 2024-11-22T06:03:58,249 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:58,249 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:58,249 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7cf525f0dcbe7849cf98c1a30246cf62: Waiting for close lock at 1732255438249Disabling compacts and flushes for region at 1732255438249Disabling writes for close at 1732255438249Writing region close event to WAL at 1732255438249Closed at 1732255438249 2024-11-22T06:03:58,251 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:03:58,251 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732255438251"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255438251"}]},"ts":"1732255438251"} 2024-11-22T06:03:58,251 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732255438251"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255438251"}]},"ts":"1732255438251"} 2024-11-22T06:03:58,254 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:03:58,255 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:03:58,256 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255438255"}]},"ts":"1732255438255"} 2024-11-22T06:03:58,258 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-22T06:03:58,258 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:03:58,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:03:58,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:03:58,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:03:58,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:03:58,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:03:58,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:03:58,260 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:03:58,260 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:03:58,260 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:03:58,260 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:03:58,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, ASSIGN}] 2024-11-22T06:03:58,263 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, ASSIGN 2024-11-22T06:03:58,263 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, ASSIGN 2024-11-22T06:03:58,265 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:03:58,265 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:03:58,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-22T06:03:58,415 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:03:58,415 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=7cf525f0dcbe7849cf98c1a30246cf62, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:58,415 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=dc79e43589f33117575a322062ec5651, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:58,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, ASSIGN because future has completed 2024-11-22T06:03:58,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc79e43589f33117575a322062ec5651, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:58,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, ASSIGN because future has completed 2024-11-22T06:03:58,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7cf525f0dcbe7849cf98c1a30246cf62, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:03:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-22T06:03:58,573 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:58,573 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => 7cf525f0dcbe7849cf98c1a30246cf62, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62.', STARTKEY => '2', ENDKEY => ''} 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => dc79e43589f33117575a322062ec5651, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651.', STARTKEY => '', ENDKEY => '2'} 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. service=AccessControlService 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. service=AccessControlService 2024-11-22T06:03:58,573 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:58,573 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:58,573 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:58,574 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,574 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,574 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,574 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,575 INFO [StoreOpener-dc79e43589f33117575a322062ec5651-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,575 INFO [StoreOpener-7cf525f0dcbe7849cf98c1a30246cf62-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,576 INFO [StoreOpener-dc79e43589f33117575a322062ec5651-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dc79e43589f33117575a322062ec5651 columnFamilyName cf 2024-11-22T06:03:58,576 DEBUG [StoreOpener-dc79e43589f33117575a322062ec5651-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:58,577 INFO [StoreOpener-7cf525f0dcbe7849cf98c1a30246cf62-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7cf525f0dcbe7849cf98c1a30246cf62 columnFamilyName cf 2024-11-22T06:03:58,577 DEBUG [StoreOpener-7cf525f0dcbe7849cf98c1a30246cf62-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:58,577 INFO [StoreOpener-dc79e43589f33117575a322062ec5651-1 {}] regionserver.HStore(327): Store=dc79e43589f33117575a322062ec5651/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:58,577 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,577 INFO [StoreOpener-7cf525f0dcbe7849cf98c1a30246cf62-1 {}] regionserver.HStore(327): Store=7cf525f0dcbe7849cf98c1a30246cf62/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:58,577 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,578 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,578 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,578 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,578 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,578 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,580 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,580 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,582 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:58,582 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:03:58,583 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened 7cf525f0dcbe7849cf98c1a30246cf62; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73920578, jitterRate=0.1015024483203888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:58,583 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened dc79e43589f33117575a322062ec5651; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74241117, jitterRate=0.10627885162830353}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:58,583 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:58,583 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dc79e43589f33117575a322062ec5651 2024-11-22T06:03:58,583 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for dc79e43589f33117575a322062ec5651: Running coprocessor pre-open hook at 1732255438574Writing region info on filesystem at 1732255438574Initializing all the Stores at 1732255438575 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255438575Cleaning up temporary data from old regions at 1732255438579 (+4 ms)Running coprocessor post-open hooks at 1732255438583 (+4 ms)Region opened successfully at 1732255438583 2024-11-22T06:03:58,583 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for 7cf525f0dcbe7849cf98c1a30246cf62: Running coprocessor pre-open hook at 1732255438574Writing region info on filesystem at 1732255438574Initializing all the Stores at 1732255438574Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255438575 (+1 ms)Cleaning up temporary data from old regions at 1732255438579 (+4 ms)Running coprocessor post-open hooks at 1732255438583 (+4 ms)Region opened successfully at 1732255438583 2024-11-22T06:03:58,584 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62., pid=109, masterSystemTime=1732255438570 2024-11-22T06:03:58,585 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651., pid=108, masterSystemTime=1732255438570 2024-11-22T06:03:58,587 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:58,587 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:58,588 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=7cf525f0dcbe7849cf98c1a30246cf62, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:58,588 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:58,588 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:58,589 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=dc79e43589f33117575a322062ec5651, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:58,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7cf525f0dcbe7849cf98c1a30246cf62, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:03:58,591 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc79e43589f33117575a322062ec5651, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:58,593 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=107 2024-11-22T06:03:58,593 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure 7cf525f0dcbe7849cf98c1a30246cf62, server=407411ea95db,35283,1732255321784 in 172 msec 2024-11-22T06:03:58,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, ASSIGN in 332 msec 2024-11-22T06:03:58,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=106 2024-11-22T06:03:58,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure dc79e43589f33117575a322062ec5651, server=407411ea95db,44619,1732255321573 in 174 msec 2024-11-22T06:03:58,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=105 2024-11-22T06:03:58,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, ASSIGN in 333 msec 2024-11-22T06:03:58,597 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:03:58,597 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255438597"}]},"ts":"1732255438597"} 2024-11-22T06:03:58,599 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-22T06:03:58,600 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:03:58,600 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-22T06:03:58,603 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-22T06:03:58,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:58,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:58,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:58,643 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:03:58,655 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,656 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,656 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,656 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,656 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,656 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,656 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,656 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-22T06:03:58,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 435 msec 2024-11-22T06:03:58,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-22T06:03:58,849 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-22T06:03:58,852 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:03:58,857 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:03:58,859 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-22T06:03:58,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [dc79e43589f33117575a322062ec5651, 7cf525f0dcbe7849cf98c1a30246cf62] 2024-11-22T06:03:58,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[dc79e43589f33117575a322062ec5651, 7cf525f0dcbe7849cf98c1a30246cf62], force=true 2024-11-22T06:03:58,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[dc79e43589f33117575a322062ec5651, 7cf525f0dcbe7849cf98c1a30246cf62], force=true 2024-11-22T06:03:58,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[dc79e43589f33117575a322062ec5651, 7cf525f0dcbe7849cf98c1a30246cf62], force=true 2024-11-22T06:03:58,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[dc79e43589f33117575a322062ec5651, 7cf525f0dcbe7849cf98c1a30246cf62], force=true 2024-11-22T06:03:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-22T06:03:58,897 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, UNASSIGN}] 2024-11-22T06:03:58,899 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, UNASSIGN 2024-11-22T06:03:58,899 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, UNASSIGN 2024-11-22T06:03:58,900 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=7cf525f0dcbe7849cf98c1a30246cf62, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:03:58,901 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=dc79e43589f33117575a322062ec5651, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:58,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, UNASSIGN because future has completed 2024-11-22T06:03:58,904 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:58,904 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7cf525f0dcbe7849cf98c1a30246cf62, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:03:58,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, UNASSIGN because future has completed 2024-11-22T06:03:58,905 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-22T06:03:58,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc79e43589f33117575a322062ec5651, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:58,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-22T06:03:59,057 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:59,057 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-22T06:03:59,058 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing 7cf525f0dcbe7849cf98c1a30246cf62, disabling compactions & flushes 2024-11-22T06:03:59,058 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:59,058 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:59,058 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. after waiting 0 ms 2024-11-22T06:03:59,058 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:59,058 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing 7cf525f0dcbe7849cf98c1a30246cf62 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-22T06:03:59,059 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close dc79e43589f33117575a322062ec5651 2024-11-22T06:03:59,059 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-22T06:03:59,060 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing dc79e43589f33117575a322062ec5651, disabling compactions & flushes 2024-11-22T06:03:59,060 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:59,060 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:59,060 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. after waiting 0 ms 2024-11-22T06:03:59,060 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:59,060 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing dc79e43589f33117575a322062ec5651 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-22T06:03:59,082 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/.tmp/cf/33efd3bebb814059bfcfd0319c39de3c is 28, key is 2/cf:/1732255438858/Put/seqid=0 2024-11-22T06:03:59,082 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/.tmp/cf/1d3635ced2fb4f2297defa16145b458e is 28, key is 1/cf:/1732255438853/Put/seqid=0 2024-11-22T06:03:59,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742098_1274 (size=4945) 2024-11-22T06:03:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742098_1274 (size=4945) 2024-11-22T06:03:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742098_1274 (size=4945) 2024-11-22T06:03:59,101 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/.tmp/cf/1d3635ced2fb4f2297defa16145b458e 2024-11-22T06:03:59,109 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/.tmp/cf/1d3635ced2fb4f2297defa16145b458e as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/cf/1d3635ced2fb4f2297defa16145b458e 2024-11-22T06:03:59,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742099_1275 (size=4945) 2024-11-22T06:03:59,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742099_1275 (size=4945) 2024-11-22T06:03:59,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742099_1275 (size=4945) 2024-11-22T06:03:59,118 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/.tmp/cf/33efd3bebb814059bfcfd0319c39de3c 2024-11-22T06:03:59,123 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/cf/1d3635ced2fb4f2297defa16145b458e, entries=1, sequenceid=5, filesize=4.8 K 2024-11-22T06:03:59,124 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for dc79e43589f33117575a322062ec5651 in 64ms, sequenceid=5, compaction requested=false 2024-11-22T06:03:59,125 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-22T06:03:59,125 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/.tmp/cf/33efd3bebb814059bfcfd0319c39de3c as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/cf/33efd3bebb814059bfcfd0319c39de3c 2024-11-22T06:03:59,132 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-22T06:03:59,133 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:59,133 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. 2024-11-22T06:03:59,133 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/cf/33efd3bebb814059bfcfd0319c39de3c, entries=1, sequenceid=5, filesize=4.8 K 2024-11-22T06:03:59,133 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for dc79e43589f33117575a322062ec5651: Waiting for close lock at 1732255439059Running coprocessor pre-close hooks at 1732255439059Disabling compacts and flushes for region at 1732255439059Disabling writes for close at 1732255439060 (+1 ms)Obtaining lock to block concurrent updates at 1732255439060Preparing flush snapshotting stores in dc79e43589f33117575a322062ec5651 at 1732255439060Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732255439060Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651. at 1732255439061 (+1 ms)Flushing dc79e43589f33117575a322062ec5651/cf: creating writer at 1732255439061Flushing dc79e43589f33117575a322062ec5651/cf: appending metadata at 1732255439082 (+21 ms)Flushing dc79e43589f33117575a322062ec5651/cf: closing flushed file at 1732255439082Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bb57b53: reopening flushed file at 1732255439108 (+26 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for dc79e43589f33117575a322062ec5651 in 64ms, sequenceid=5, compaction requested=false at 1732255439124 (+16 ms)Writing region close event to WAL at 1732255439127 (+3 ms)Running coprocessor post-close hooks at 1732255439132 (+5 ms)Closed at 1732255439133 (+1 ms) 2024-11-22T06:03:59,135 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 7cf525f0dcbe7849cf98c1a30246cf62 in 76ms, sequenceid=5, compaction requested=false 2024-11-22T06:03:59,137 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed dc79e43589f33117575a322062ec5651 2024-11-22T06:03:59,137 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=dc79e43589f33117575a322062ec5651, regionState=CLOSED 2024-11-22T06:03:59,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc79e43589f33117575a322062ec5651, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:59,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=111 2024-11-22T06:03:59,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure dc79e43589f33117575a322062ec5651, server=407411ea95db,44619,1732255321573 in 235 msec 2024-11-22T06:03:59,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=dc79e43589f33117575a322062ec5651, UNASSIGN in 248 msec 2024-11-22T06:03:59,152 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-22T06:03:59,153 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:03:59,153 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. 2024-11-22T06:03:59,153 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for 7cf525f0dcbe7849cf98c1a30246cf62: Waiting for close lock at 1732255439058Running coprocessor pre-close hooks at 1732255439058Disabling compacts and flushes for region at 1732255439058Disabling writes for close at 1732255439058Obtaining lock to block concurrent updates at 1732255439058Preparing flush snapshotting stores in 7cf525f0dcbe7849cf98c1a30246cf62 at 1732255439058Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732255439058Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62. at 1732255439059 (+1 ms)Flushing 7cf525f0dcbe7849cf98c1a30246cf62/cf: creating writer at 1732255439059Flushing 7cf525f0dcbe7849cf98c1a30246cf62/cf: appending metadata at 1732255439082 (+23 ms)Flushing 7cf525f0dcbe7849cf98c1a30246cf62/cf: closing flushed file at 1732255439082Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e77f7db: reopening flushed file at 1732255439124 (+42 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 7cf525f0dcbe7849cf98c1a30246cf62 in 76ms, sequenceid=5, compaction requested=false at 1732255439135 (+11 ms)Writing region close event to WAL at 1732255439137 (+2 ms)Running coprocessor post-close hooks at 1732255439153 (+16 ms)Closed at 1732255439153 2024-11-22T06:03:59,155 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed 7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:03:59,156 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=7cf525f0dcbe7849cf98c1a30246cf62, regionState=CLOSED 2024-11-22T06:03:59,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7cf525f0dcbe7849cf98c1a30246cf62, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:03:59,161 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=112 2024-11-22T06:03:59,161 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure 7cf525f0dcbe7849cf98c1a30246cf62, server=407411ea95db,35283,1732255321784 in 254 msec 2024-11-22T06:03:59,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-11-22T06:03:59,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=7cf525f0dcbe7849cf98c1a30246cf62, UNASSIGN in 264 msec 2024-11-22T06:03:59,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742100_1276 (size=84) 2024-11-22T06:03:59,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742100_1276 (size=84) 2024-11-22T06:03:59,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742100_1276 (size=84) 2024-11-22T06:03:59,189 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-22T06:03:59,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742101_1277 (size=20) 2024-11-22T06:03:59,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742101_1277 (size=20) 2024-11-22T06:03:59,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742101_1277 (size=20) 2024-11-22T06:03:59,211 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:59,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742102_1278 (size=21) 2024-11-22T06:03:59,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742102_1278 (size=21) 2024-11-22T06:03:59,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742102_1278 (size=21) 2024-11-22T06:03:59,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742103_1279 (size=84) 2024-11-22T06:03:59,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742103_1279 (size=84) 2024-11-22T06:03:59,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742103_1279 (size=84) 2024-11-22T06:03:59,245 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:59,267 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-22T06:03:59,270 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438220.dc79e43589f33117575a322062ec5651.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:59,271 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732255438220.7cf525f0dcbe7849cf98c1a30246cf62.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:59,271 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-22T06:03:59,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, ASSIGN}] 2024-11-22T06:03:59,295 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, ASSIGN 2024-11-22T06:03:59,296 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, ASSIGN; state=MERGED, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:03:59,447 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-22T06:03:59,449 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=ef394ae7b5140734d6b528266ee80f5b, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:59,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, ASSIGN because future has completed 2024-11-22T06:03:59,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef394ae7b5140734d6b528266ee80f5b, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:03:59,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-22T06:03:59,609 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:03:59,610 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => ef394ae7b5140734d6b528266ee80f5b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b.', STARTKEY => '', ENDKEY => ''} 2024-11-22T06:03:59,610 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. service=AccessControlService 2024-11-22T06:03:59,610 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:03:59,610 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,610 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:03:59,610 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,610 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,612 INFO [StoreOpener-ef394ae7b5140734d6b528266ee80f5b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,613 INFO [StoreOpener-ef394ae7b5140734d6b528266ee80f5b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef394ae7b5140734d6b528266ee80f5b columnFamilyName cf 2024-11-22T06:03:59,613 DEBUG [StoreOpener-ef394ae7b5140734d6b528266ee80f5b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:03:59,631 DEBUG [StoreOpener-ef394ae7b5140734d6b528266ee80f5b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/1d3635ced2fb4f2297defa16145b458e.dc79e43589f33117575a322062ec5651->hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/cf/1d3635ced2fb4f2297defa16145b458e-top 2024-11-22T06:03:59,638 DEBUG [StoreOpener-ef394ae7b5140734d6b528266ee80f5b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/33efd3bebb814059bfcfd0319c39de3c.7cf525f0dcbe7849cf98c1a30246cf62->hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/cf/33efd3bebb814059bfcfd0319c39de3c-top 2024-11-22T06:03:59,638 INFO [StoreOpener-ef394ae7b5140734d6b528266ee80f5b-1 {}] regionserver.HStore(327): Store=ef394ae7b5140734d6b528266ee80f5b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:03:59,638 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,640 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,641 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,643 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,643 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,646 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,651 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened ef394ae7b5140734d6b528266ee80f5b; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68801639, jitterRate=0.025224313139915466}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:03:59,651 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:03:59,652 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for ef394ae7b5140734d6b528266ee80f5b: Running coprocessor pre-open hook at 1732255439610Writing region info on filesystem at 1732255439611 (+1 ms)Initializing all the Stores at 1732255439611Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255439611Cleaning up temporary data from old regions at 1732255439643 (+32 ms)Running coprocessor post-open hooks at 1732255439651 (+8 ms)Region opened successfully at 1732255439652 (+1 ms) 2024-11-22T06:03:59,653 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b., pid=116, masterSystemTime=1732255439606 2024-11-22T06:03:59,653 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b.,because compaction is disabled. 2024-11-22T06:03:59,657 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:03:59,657 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:03:59,658 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=ef394ae7b5140734d6b528266ee80f5b, regionState=OPEN, openSeqNum=9, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:03:59,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef394ae7b5140734d6b528266ee80f5b, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:03:59,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-22T06:03:59,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure ef394ae7b5140734d6b528266ee80f5b, server=407411ea95db,44619,1732255321573 in 209 msec 2024-11-22T06:03:59,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-22T06:03:59,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, ASSIGN in 377 msec 2024-11-22T06:03:59,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[dc79e43589f33117575a322062ec5651, 7cf525f0dcbe7849cf98c1a30246cf62], force=true in 796 msec 2024-11-22T06:03:59,721 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:04:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-22T06:04:00,019 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-22T06:04:00,020 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-22T06:04:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255440020 (current time:1732255440020). 2024-11-22T06:04:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:04:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-22T06:04:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c165577, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:00,021 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:00,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:00,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:00,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48728687, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:00,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:00,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:00,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:00,023 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:00,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aef7524, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:00,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:00,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:00,025 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:00,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:00,027 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c081dd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:00,029 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:00,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:00,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:00,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6187c053, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:00,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:00,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:00,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:00,030 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:00,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5db1ba50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:00,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:00,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:00,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:00,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:00,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:00,039 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-22T06:04:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-22T06:04:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-22T06:04:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-22T06:04:00,043 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:00,044 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:00,047 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:00,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742104_1280 (size=216) 2024-11-22T06:04:00,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742104_1280 (size=216) 2024-11-22T06:04:00,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742104_1280 (size=216) 2024-11-22T06:04:00,066 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:00,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef394ae7b5140734d6b528266ee80f5b}] 2024-11-22T06:04:00,067 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:04:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-22T06:04:00,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-22T06:04:00,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:04:00,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for ef394ae7b5140734d6b528266ee80f5b: 2024-11-22T06:04:00,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-22T06:04:00,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:00,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/1d3635ced2fb4f2297defa16145b458e.dc79e43589f33117575a322062ec5651->hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/cf/1d3635ced2fb4f2297defa16145b458e-top, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/33efd3bebb814059bfcfd0319c39de3c.7cf525f0dcbe7849cf98c1a30246cf62->hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/cf/33efd3bebb814059bfcfd0319c39de3c-top] hfiles 2024-11-22T06:04:00,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/1d3635ced2fb4f2297defa16145b458e.dc79e43589f33117575a322062ec5651 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/33efd3bebb814059bfcfd0319c39de3c.7cf525f0dcbe7849cf98c1a30246cf62 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742105_1281 (size=269) 2024-11-22T06:04:00,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742105_1281 (size=269) 2024-11-22T06:04:00,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742105_1281 (size=269) 2024-11-22T06:04:00,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:04:00,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-22T06:04:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-22T06:04:00,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:04:00,228 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:04:00,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-22T06:04:00,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ef394ae7b5140734d6b528266ee80f5b in 163 msec 2024-11-22T06:04:00,231 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:00,232 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:00,232 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:00,233 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,233 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742106_1282 (size=670) 2024-11-22T06:04:00,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742106_1282 (size=670) 2024-11-22T06:04:00,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742106_1282 (size=670) 2024-11-22T06:04:00,250 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:00,257 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:00,258 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,259 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:00,259 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-22T06:04:00,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 219 msec 2024-11-22T06:04:00,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-22T06:04:00,359 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-22T06:04:00,360 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359 2024-11-22T06:04:00,360 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:00,399 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:00,399 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,401 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:04:00,409 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:00,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742108_1284 (size=670) 2024-11-22T06:04:00,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742108_1284 (size=670) 2024-11-22T06:04:00,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742108_1284 (size=670) 2024-11-22T06:04:00,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742107_1283 (size=216) 2024-11-22T06:04:00,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742107_1283 (size=216) 2024-11-22T06:04:00,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742107_1283 (size=216) 2024-11-22T06:04:00,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:00,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:00,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:00,555 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0005_000001 (auth:SIMPLE) from 127.0.0.1:33098 2024-11-22T06:04:00,570 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0005/container_1732255328437_0005_01_000001/launch_container.sh] 2024-11-22T06:04:00,570 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0005/container_1732255328437_0005_01_000001/container_tokens] 2024-11-22T06:04:00,570 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0005/container_1732255328437_0005_01_000001/sysfs] 2024-11-22T06:04:01,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-15598818678050008720.jar 2024-11-22T06:04:01,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,544 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:04:01,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-14535165371739451717.jar 2024-11-22T06:04:01,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:01,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:04:01,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:04:01,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:04:01,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:04:01,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:04:01,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:04:01,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:04:01,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:04:01,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:04:01,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:04:01,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:04:01,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:01,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:01,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:01,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:01,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:01,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:01,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:01,591 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:01,591 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-22T06:04:01,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:01,592 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-22T06:04:01,592 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-22T06:04:01,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742109_1285 (size=131440) 2024-11-22T06:04:01,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742109_1285 (size=131440) 2024-11-22T06:04:01,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742109_1285 (size=131440) 2024-11-22T06:04:01,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742110_1286 (size=4188619) 2024-11-22T06:04:01,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742110_1286 (size=4188619) 2024-11-22T06:04:01,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742110_1286 (size=4188619) 2024-11-22T06:04:01,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742111_1287 (size=1323991) 2024-11-22T06:04:01,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742111_1287 (size=1323991) 2024-11-22T06:04:01,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742111_1287 (size=1323991) 2024-11-22T06:04:01,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742112_1288 (size=903736) 2024-11-22T06:04:01,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742112_1288 (size=903736) 2024-11-22T06:04:01,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742112_1288 (size=903736) 2024-11-22T06:04:01,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742113_1289 (size=8360083) 2024-11-22T06:04:01,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742113_1289 (size=8360083) 2024-11-22T06:04:01,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742113_1289 (size=8360083) 2024-11-22T06:04:01,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742114_1290 (size=1877034) 2024-11-22T06:04:01,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742114_1290 (size=1877034) 2024-11-22T06:04:01,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742114_1290 (size=1877034) 2024-11-22T06:04:01,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742115_1291 (size=77835) 2024-11-22T06:04:01,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742115_1291 (size=77835) 2024-11-22T06:04:01,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742115_1291 (size=77835) 2024-11-22T06:04:01,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742116_1292 (size=30949) 2024-11-22T06:04:01,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742116_1292 (size=30949) 2024-11-22T06:04:01,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742116_1292 (size=30949) 2024-11-22T06:04:01,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742117_1293 (size=440656) 2024-11-22T06:04:01,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742117_1293 (size=440656) 2024-11-22T06:04:01,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742117_1293 (size=440656) 2024-11-22T06:04:01,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742118_1294 (size=1597347) 2024-11-22T06:04:01,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742118_1294 (size=1597347) 2024-11-22T06:04:01,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742118_1294 (size=1597347) 2024-11-22T06:04:01,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742119_1295 (size=4695811) 2024-11-22T06:04:01,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742119_1295 (size=4695811) 2024-11-22T06:04:01,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742119_1295 (size=4695811) 2024-11-22T06:04:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742120_1296 (size=232957) 2024-11-22T06:04:01,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742120_1296 (size=232957) 2024-11-22T06:04:01,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742120_1296 (size=232957) 2024-11-22T06:04:01,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742121_1297 (size=127628) 2024-11-22T06:04:01,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742121_1297 (size=127628) 2024-11-22T06:04:01,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742121_1297 (size=127628) 2024-11-22T06:04:01,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742122_1298 (size=20406) 2024-11-22T06:04:01,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742122_1298 (size=20406) 2024-11-22T06:04:01,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742122_1298 (size=20406) 2024-11-22T06:04:01,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742123_1299 (size=5175431) 2024-11-22T06:04:01,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742123_1299 (size=5175431) 2024-11-22T06:04:01,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742123_1299 (size=5175431) 2024-11-22T06:04:01,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742124_1300 (size=217634) 2024-11-22T06:04:01,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742124_1300 (size=217634) 2024-11-22T06:04:01,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742124_1300 (size=217634) 2024-11-22T06:04:02,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742125_1301 (size=6424744) 2024-11-22T06:04:02,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742125_1301 (size=6424744) 2024-11-22T06:04:02,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742125_1301 (size=6424744) 2024-11-22T06:04:02,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742126_1302 (size=1832290) 2024-11-22T06:04:02,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742126_1302 (size=1832290) 2024-11-22T06:04:02,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742126_1302 (size=1832290) 2024-11-22T06:04:02,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742127_1303 (size=322274) 2024-11-22T06:04:02,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742127_1303 (size=322274) 2024-11-22T06:04:02,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742127_1303 (size=322274) 2024-11-22T06:04:02,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742128_1304 (size=503880) 2024-11-22T06:04:02,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742128_1304 (size=503880) 2024-11-22T06:04:02,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742128_1304 (size=503880) 2024-11-22T06:04:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742129_1305 (size=29229) 2024-11-22T06:04:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742129_1305 (size=29229) 2024-11-22T06:04:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742129_1305 (size=29229) 2024-11-22T06:04:02,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742130_1306 (size=24096) 2024-11-22T06:04:02,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742130_1306 (size=24096) 2024-11-22T06:04:02,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742130_1306 (size=24096) 2024-11-22T06:04:02,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742131_1307 (size=111872) 2024-11-22T06:04:02,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742131_1307 (size=111872) 2024-11-22T06:04:02,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742131_1307 (size=111872) 2024-11-22T06:04:02,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742132_1308 (size=45609) 2024-11-22T06:04:02,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742132_1308 (size=45609) 2024-11-22T06:04:02,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742132_1308 (size=45609) 2024-11-22T06:04:02,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742133_1309 (size=136454) 2024-11-22T06:04:02,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742133_1309 (size=136454) 2024-11-22T06:04:02,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742133_1309 (size=136454) 2024-11-22T06:04:02,179 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:04:02,181 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-22T06:04:02,184 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-22T06:04:02,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742134_1310 (size=378) 2024-11-22T06:04:02,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742134_1310 (size=378) 2024-11-22T06:04:02,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742134_1310 (size=378) 2024-11-22T06:04:02,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742135_1311 (size=15) 2024-11-22T06:04:02,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742135_1311 (size=15) 2024-11-22T06:04:02,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742135_1311 (size=15) 2024-11-22T06:04:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742136_1312 (size=303793) 2024-11-22T06:04:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742136_1312 (size=303793) 2024-11-22T06:04:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742136_1312 (size=303793) 2024-11-22T06:04:02,237 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:04:02,237 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:04:02,372 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0006_000001 (auth:SIMPLE) from 127.0.0.1:32982 2024-11-22T06:04:03,467 WARN [regionserver/407411ea95db:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 1 2024-11-22T06:04:07,094 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:04:08,073 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0006_000001 (auth:SIMPLE) from 127.0.0.1:38398 2024-11-22T06:04:08,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742137_1313 (size=349443) 2024-11-22T06:04:08,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742137_1313 (size=349443) 2024-11-22T06:04:08,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742137_1313 (size=349443) 2024-11-22T06:04:09,764 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 9c5d3a15b43735703c39879cab4ee61a changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:04:09,764 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 22cbb4718a753f944b86f8974eee4ebe changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:04:10,293 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0006_000001 (auth:SIMPLE) from 127.0.0.1:32986 2024-11-22T06:04:13,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742138_1314 (size=4945) 2024-11-22T06:04:13,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742138_1314 (size=4945) 2024-11-22T06:04:13,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742138_1314 (size=4945) 2024-11-22T06:04:13,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742139_1315 (size=4945) 2024-11-22T06:04:13,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742139_1315 (size=4945) 2024-11-22T06:04:13,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742139_1315 (size=4945) 2024-11-22T06:04:13,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742140_1316 (size=17474) 2024-11-22T06:04:13,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742140_1316 (size=17474) 2024-11-22T06:04:13,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742140_1316 (size=17474) 2024-11-22T06:04:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742141_1317 (size=482) 2024-11-22T06:04:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742141_1317 (size=482) 2024-11-22T06:04:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742141_1317 (size=482) 2024-11-22T06:04:13,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742142_1318 (size=17474) 2024-11-22T06:04:13,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742142_1318 (size=17474) 2024-11-22T06:04:13,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742142_1318 (size=17474) 2024-11-22T06:04:13,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_0/usercache/jenkins/appcache/application_1732255328437_0006/container_1732255328437_0006_01_000002/launch_container.sh] 2024-11-22T06:04:13,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_0/usercache/jenkins/appcache/application_1732255328437_0006/container_1732255328437_0006_01_000002/container_tokens] 2024-11-22T06:04:13,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_0/usercache/jenkins/appcache/application_1732255328437_0006/container_1732255328437_0006_01_000002/sysfs] 2024-11-22T06:04:13,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742143_1319 (size=349443) 2024-11-22T06:04:13,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742143_1319 (size=349443) 2024-11-22T06:04:13,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742143_1319 (size=349443) 2024-11-22T06:04:13,835 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0006_000001 (auth:SIMPLE) from 127.0.0.1:43696 2024-11-22T06:04:15,594 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:04:15,595 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:04:15,601 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,602 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:04:15,602 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:04:15,602 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,603 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-22T06:04:15,603 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-22T06:04:15,603 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,603 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-22T06:04:15,603 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255440359/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-22T06:04:15,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-22T06:04:15,613 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255455613"}]},"ts":"1732255455613"} 2024-11-22T06:04:15,615 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-22T06:04:15,615 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-22T06:04:15,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-22T06:04:15,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, UNASSIGN}] 2024-11-22T06:04:15,618 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, UNASSIGN 2024-11-22T06:04:15,619 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=ef394ae7b5140734d6b528266ee80f5b, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:15,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, UNASSIGN because future has completed 2024-11-22T06:04:15,621 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:04:15,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef394ae7b5140734d6b528266ee80f5b, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:15,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-22T06:04:15,773 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:04:15,773 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:04:15,773 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing ef394ae7b5140734d6b528266ee80f5b, disabling compactions & flushes 2024-11-22T06:04:15,773 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:04:15,774 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:04:15,774 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. after waiting 0 ms 2024-11-22T06:04:15,774 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:04:15,778 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-22T06:04:15,779 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:04:15,779 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b. 2024-11-22T06:04:15,779 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for ef394ae7b5140734d6b528266ee80f5b: Waiting for close lock at 1732255455773Running coprocessor pre-close hooks at 1732255455773Disabling compacts and flushes for region at 1732255455773Disabling writes for close at 1732255455774 (+1 ms)Writing region close event to WAL at 1732255455774Running coprocessor post-close hooks at 1732255455779 (+5 ms)Closed at 1732255455779 2024-11-22T06:04:15,781 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:04:15,781 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=ef394ae7b5140734d6b528266ee80f5b, regionState=CLOSED 2024-11-22T06:04:15,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef394ae7b5140734d6b528266ee80f5b, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:04:15,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-22T06:04:15,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure ef394ae7b5140734d6b528266ee80f5b, server=407411ea95db,44619,1732255321573 in 162 msec 2024-11-22T06:04:15,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-22T06:04:15,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ef394ae7b5140734d6b528266ee80f5b, UNASSIGN in 168 msec 2024-11-22T06:04:15,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-22T06:04:15,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 171 msec 2024-11-22T06:04:15,790 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255455790"}]},"ts":"1732255455790"} 2024-11-22T06:04:15,792 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-22T06:04:15,792 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-22T06:04:15,794 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 184 msec 2024-11-22T06:04:15,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-22T06:04:15,930 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-22T06:04:15,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,932 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,933 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,935 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,936 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:04:15,936 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651 2024-11-22T06:04:15,936 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:04:15,937 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/recovered.edits] 2024-11-22T06:04:15,937 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/recovered.edits] 2024-11-22T06:04:15,937 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/recovered.edits] 2024-11-22T06:04:15,940 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/1d3635ced2fb4f2297defa16145b458e.dc79e43589f33117575a322062ec5651 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/1d3635ced2fb4f2297defa16145b458e.dc79e43589f33117575a322062ec5651 2024-11-22T06:04:15,940 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/cf/33efd3bebb814059bfcfd0319c39de3c to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/cf/33efd3bebb814059bfcfd0319c39de3c 2024-11-22T06:04:15,940 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/cf/1d3635ced2fb4f2297defa16145b458e to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/cf/1d3635ced2fb4f2297defa16145b458e 2024-11-22T06:04:15,941 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/33efd3bebb814059bfcfd0319c39de3c.7cf525f0dcbe7849cf98c1a30246cf62 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/cf/33efd3bebb814059bfcfd0319c39de3c.7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:04:15,943 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/recovered.edits/8.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62/recovered.edits/8.seqid 2024-11-22T06:04:15,943 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/recovered.edits/8.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651/recovered.edits/8.seqid 2024-11-22T06:04:15,943 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/7cf525f0dcbe7849cf98c1a30246cf62 2024-11-22T06:04:15,943 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/dc79e43589f33117575a322062ec5651 2024-11-22T06:04:15,944 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/recovered.edits/12.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b/recovered.edits/12.seqid 2024-11-22T06:04:15,944 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ef394ae7b5140734d6b528266ee80f5b 2024-11-22T06:04:15,944 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-22T06:04:15,946 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,948 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-22T06:04:15,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,977 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-22T06:04:15,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-22T06:04:15,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-22T06:04:15,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-22T06:04:15,979 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-22T06:04:15,980 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,980 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-22T06:04:15,980 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255455980"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:15,982 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-22T06:04:15,982 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ef394ae7b5140734d6b528266ee80f5b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T06:04:15,982 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-22T06:04:15,982 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255455982"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:15,983 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-22T06:04:15,984 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,985 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 54 msec 2024-11-22T06:04:15,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,985 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,985 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:15,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:15,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:15,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:15,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-22T06:04:15,987 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:15,987 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-22T06:04:15,987 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:15,987 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:15,987 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:15,987 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:15,987 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:15,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:15,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-22T06:04:15,990 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255455989"}]},"ts":"1732255455989"} 2024-11-22T06:04:15,991 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-22T06:04:15,991 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-22T06:04:15,992 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-22T06:04:15,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, UNASSIGN}] 2024-11-22T06:04:15,994 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, UNASSIGN 2024-11-22T06:04:15,994 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, UNASSIGN 2024-11-22T06:04:15,994 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=22cbb4718a753f944b86f8974eee4ebe, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:15,994 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=9c5d3a15b43735703c39879cab4ee61a, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:04:15,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, UNASSIGN because future has completed 2024-11-22T06:04:15,996 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:04:15,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22cbb4718a753f944b86f8974eee4ebe, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:15,997 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=407411ea95db,35283,1732255321784, table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-22T06:04:15,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, UNASSIGN because future has completed 2024-11-22T06:04:15,997 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:04:15,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9c5d3a15b43735703c39879cab4ee61a, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:04:16,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-22T06:04:16,149 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:04:16,149 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:04:16,149 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing 22cbb4718a753f944b86f8974eee4ebe, disabling compactions & flushes 2024-11-22T06:04:16,150 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:04:16,150 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:04:16,150 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. after waiting 0 ms 2024-11-22T06:04:16,150 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:04:16,150 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:04:16,150 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:04:16,150 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 9c5d3a15b43735703c39879cab4ee61a, disabling compactions & flushes 2024-11-22T06:04:16,150 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:04:16,150 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:04:16,151 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. after waiting 0 ms 2024-11-22T06:04:16,151 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:04:16,156 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:04:16,157 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:04:16,157 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe. 2024-11-22T06:04:16,157 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for 22cbb4718a753f944b86f8974eee4ebe: Waiting for close lock at 1732255456149Running coprocessor pre-close hooks at 1732255456149Disabling compacts and flushes for region at 1732255456149Disabling writes for close at 1732255456150 (+1 ms)Writing region close event to WAL at 1732255456151 (+1 ms)Running coprocessor post-close hooks at 1732255456157 (+6 ms)Closed at 1732255456157 2024-11-22T06:04:16,158 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:04:16,158 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:04:16,159 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a. 2024-11-22T06:04:16,159 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 9c5d3a15b43735703c39879cab4ee61a: Waiting for close lock at 1732255456150Running coprocessor pre-close hooks at 1732255456150Disabling compacts and flushes for region at 1732255456150Disabling writes for close at 1732255456151 (+1 ms)Writing region close event to WAL at 1732255456152 (+1 ms)Running coprocessor post-close hooks at 1732255456158 (+6 ms)Closed at 1732255456158 2024-11-22T06:04:16,160 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed 22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:04:16,161 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=22cbb4718a753f944b86f8974eee4ebe, regionState=CLOSED 2024-11-22T06:04:16,162 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:04:16,162 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=9c5d3a15b43735703c39879cab4ee61a, regionState=CLOSED 2024-11-22T06:04:16,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22cbb4718a753f944b86f8974eee4ebe, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:04:16,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9c5d3a15b43735703c39879cab4ee61a, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:04:16,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-11-22T06:04:16,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure 22cbb4718a753f944b86f8974eee4ebe, server=407411ea95db,44619,1732255321573 in 169 msec 2024-11-22T06:04:16,168 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=22cbb4718a753f944b86f8974eee4ebe, UNASSIGN in 174 msec 2024-11-22T06:04:16,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-11-22T06:04:16,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 9c5d3a15b43735703c39879cab4ee61a, server=407411ea95db,35283,1732255321784 in 169 msec 2024-11-22T06:04:16,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=125 2024-11-22T06:04:16,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=9c5d3a15b43735703c39879cab4ee61a, UNASSIGN in 175 msec 2024-11-22T06:04:16,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-22T06:04:16,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 178 msec 2024-11-22T06:04:16,173 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255456172"}]},"ts":"1732255456172"} 2024-11-22T06:04:16,174 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-22T06:04:16,174 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-22T06:04:16,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 188 msec 2024-11-22T06:04:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-22T06:04:16,309 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-22T06:04:16,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,311 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,312 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,314 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,315 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:04:16,315 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:04:16,317 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/recovered.edits] 2024-11-22T06:04:16,317 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/recovered.edits] 2024-11-22T06:04:16,320 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/cf/022d71d7754f4b2cb4336a81a04f7560 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/cf/022d71d7754f4b2cb4336a81a04f7560 2024-11-22T06:04:16,320 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/cf/48bae1a3c05642bcb4bd7029582c9122 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/cf/48bae1a3c05642bcb4bd7029582c9122 2024-11-22T06:04:16,323 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a/recovered.edits/9.seqid 2024-11-22T06:04:16,323 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe/recovered.edits/9.seqid 2024-11-22T06:04:16,323 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/9c5d3a15b43735703c39879cab4ee61a 2024-11-22T06:04:16,323 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithMergeRegion/22cbb4718a753f944b86f8974eee4ebe 2024-11-22T06:04:16,323 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-22T06:04:16,325 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,328 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-22T06:04:16,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,352 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,353 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-22T06:04:16,353 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-22T06:04:16,354 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-22T06:04:16,354 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-22T06:04:16,355 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-22T06:04:16,357 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,357 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-22T06:04:16,357 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255456357"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:16,357 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255456357"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:16,360 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:04:16,360 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 22cbb4718a753f944b86f8974eee4ebe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732255436325.22cbb4718a753f944b86f8974eee4ebe.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9c5d3a15b43735703c39879cab4ee61a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732255436325.9c5d3a15b43735703c39879cab4ee61a.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:04:16,360 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-22T06:04:16,360 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255456360"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:16,362 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-22T06:04:16,363 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,364 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,364 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-22T06:04:16,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 53 msec 2024-11-22T06:04:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-22T06:04:16,471 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,471 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-22T06:04:16,485 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-22T06:04:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-22T06:04:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:16,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-22T06:04:16,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:16,519 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=802 (was 790) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:44348 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:54916 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42041 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33903 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 126406) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/407411ea95db:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_180320438_1 at /127.0.0.1:44322 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4511 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:42041 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:51920 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=558 (was 614), ProcessCount=16 (was 16), AvailableMemoryMB=3929 (was 4035) 2024-11-22T06:04:16,519 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-11-22T06:04:16,534 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=802, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=558, ProcessCount=16, AvailableMemoryMB=3929 2024-11-22T06:04:16,534 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-11-22T06:04:16,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:04:16,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:16,537 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:04:16,537 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:16,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-22T06:04:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-22T06:04:16,538 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:04:16,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742144_1320 (size=407) 2024-11-22T06:04:16,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742144_1320 (size=407) 2024-11-22T06:04:16,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742144_1320 (size=407) 2024-11-22T06:04:16,550 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bade74021296d5cb036873466d5300bd, NAME => 'testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:16,550 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 741a7efeeb2abbf2cefdb07f109c09a8, NAME => 'testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742146_1322 (size=68) 2024-11-22T06:04:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742146_1322 (size=68) 2024-11-22T06:04:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742145_1321 (size=68) 2024-11-22T06:04:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742146_1322 (size=68) 2024-11-22T06:04:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742145_1321 (size=68) 2024-11-22T06:04:16,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742145_1321 (size=68) 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing bade74021296d5cb036873466d5300bd, disabling compactions & flushes 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 741a7efeeb2abbf2cefdb07f109c09a8, disabling compactions & flushes 2024-11-22T06:04:16,559 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:16,559 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. after waiting 0 ms 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. after waiting 0 ms 2024-11-22T06:04:16,559 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for bade74021296d5cb036873466d5300bd: Waiting for close lock at 1732255456559Disabling compacts and flushes for region at 1732255456559Disabling writes for close at 1732255456559Writing region close event to WAL at 1732255456559Closed at 1732255456559 2024-11-22T06:04:16,559 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:16,559 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 741a7efeeb2abbf2cefdb07f109c09a8: Waiting for close lock at 1732255456559Disabling compacts and flushes for region at 1732255456559Disabling writes for close at 1732255456559Writing region close event to WAL at 1732255456559Closed at 1732255456559 2024-11-22T06:04:16,560 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:04:16,560 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732255456560"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255456560"}]},"ts":"1732255456560"} 2024-11-22T06:04:16,560 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732255456560"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255456560"}]},"ts":"1732255456560"} 2024-11-22T06:04:16,562 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:04:16,563 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:04:16,563 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255456563"}]},"ts":"1732255456563"} 2024-11-22T06:04:16,565 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-22T06:04:16,565 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:04:16,566 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:04:16,566 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:04:16,566 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:04:16,566 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:04:16,566 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:04:16,566 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:04:16,566 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:04:16,566 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:04:16,566 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:04:16,566 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:04:16,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, ASSIGN}] 2024-11-22T06:04:16,568 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, ASSIGN 2024-11-22T06:04:16,568 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, ASSIGN 2024-11-22T06:04:16,568 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:04:16,568 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:04:16,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-22T06:04:16,719 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:04:16,719 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=bade74021296d5cb036873466d5300bd, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:16,719 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=741a7efeeb2abbf2cefdb07f109c09a8, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:04:16,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, ASSIGN because future has completed 2024-11-22T06:04:16,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure bade74021296d5cb036873466d5300bd, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:16,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, ASSIGN because future has completed 2024-11-22T06:04:16,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:04:16,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-22T06:04:16,881 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:16,881 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => bade74021296d5cb036873466d5300bd, NAME => 'testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:04:16,881 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:16,882 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 741a7efeeb2abbf2cefdb07f109c09a8, NAME => 'testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:04:16,882 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. service=AccessControlService 2024-11-22T06:04:16,882 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. service=AccessControlService 2024-11-22T06:04:16,882 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:16,882 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:16,882 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,882 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,883 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:16,883 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:16,883 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,883 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,883 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,883 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,884 INFO [StoreOpener-741a7efeeb2abbf2cefdb07f109c09a8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,884 INFO [StoreOpener-bade74021296d5cb036873466d5300bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,885 INFO [StoreOpener-741a7efeeb2abbf2cefdb07f109c09a8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 741a7efeeb2abbf2cefdb07f109c09a8 columnFamilyName cf 2024-11-22T06:04:16,885 INFO [StoreOpener-bade74021296d5cb036873466d5300bd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bade74021296d5cb036873466d5300bd columnFamilyName cf 2024-11-22T06:04:16,885 DEBUG [StoreOpener-bade74021296d5cb036873466d5300bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:16,885 DEBUG [StoreOpener-741a7efeeb2abbf2cefdb07f109c09a8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:16,886 INFO [StoreOpener-741a7efeeb2abbf2cefdb07f109c09a8-1 {}] regionserver.HStore(327): Store=741a7efeeb2abbf2cefdb07f109c09a8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:16,886 INFO [StoreOpener-bade74021296d5cb036873466d5300bd-1 {}] regionserver.HStore(327): Store=bade74021296d5cb036873466d5300bd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:16,886 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,886 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,887 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,887 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,887 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,887 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,887 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,887 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,887 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,888 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,889 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,889 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,891 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:16,891 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:16,891 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 741a7efeeb2abbf2cefdb07f109c09a8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62394123, jitterRate=-0.07025511562824249}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:16,891 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:16,891 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened bade74021296d5cb036873466d5300bd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73677561, jitterRate=0.09788121283054352}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:16,892 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bade74021296d5cb036873466d5300bd 2024-11-22T06:04:16,892 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 741a7efeeb2abbf2cefdb07f109c09a8: Running coprocessor pre-open hook at 1732255456883Writing region info on filesystem at 1732255456883Initializing all the Stores at 1732255456884 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255456884Cleaning up temporary data from old regions at 1732255456887 (+3 ms)Running coprocessor post-open hooks at 1732255456892 (+5 ms)Region opened successfully at 1732255456892 2024-11-22T06:04:16,892 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for bade74021296d5cb036873466d5300bd: Running coprocessor pre-open hook at 1732255456883Writing region info on filesystem at 1732255456883Initializing all the Stores at 1732255456884 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255456884Cleaning up temporary data from old regions at 1732255456888 (+4 ms)Running coprocessor post-open hooks at 1732255456892 (+4 ms)Region opened successfully at 1732255456892 2024-11-22T06:04:16,893 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8., pid=135, masterSystemTime=1732255456873 2024-11-22T06:04:16,893 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd., pid=134, masterSystemTime=1732255456873 2024-11-22T06:04:16,895 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:16,895 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:16,896 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=bade74021296d5cb036873466d5300bd, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:16,896 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:16,896 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:16,896 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=741a7efeeb2abbf2cefdb07f109c09a8, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:04:16,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure bade74021296d5cb036873466d5300bd, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:04:16,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:04:16,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-11-22T06:04:16,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-11-22T06:04:16,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure bade74021296d5cb036873466d5300bd, server=407411ea95db,44619,1732255321573 in 177 msec 2024-11-22T06:04:16,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8, server=407411ea95db,35283,1732255321784 in 177 msec 2024-11-22T06:04:16,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, ASSIGN in 333 msec 2024-11-22T06:04:16,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=131 2024-11-22T06:04:16,901 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, ASSIGN in 333 msec 2024-11-22T06:04:16,902 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:04:16,902 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255456902"}]},"ts":"1732255456902"} 2024-11-22T06:04:16,904 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-22T06:04:16,904 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:04:16,905 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-22T06:04:16,907 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-22T06:04:16,947 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:16,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 421 msec 2024-11-22T06:04:17,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-22T06:04:17,170 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-22T06:04:17,170 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-22T06:04:17,170 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:17,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-22T06:04:17,177 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:17,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-11-22T06:04:17,177 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:17,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-22T06:04:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255457181 (current time:1732255457181). 2024-11-22T06:04:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:04:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-22T06:04:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:17,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12bdfa82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:17,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:17,183 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:17,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:17,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:17,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb31bc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:17,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:17,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,185 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38698, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:17,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@751422d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:17,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:17,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:17,188 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:17,189 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:17,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:17,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,190 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:17,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ec5673a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:17,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:17,191 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:17,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:17,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:17,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cfa48a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:17,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:17,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,193 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:17,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bfc5e32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:17,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:17,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:17,197 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37990, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:17,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:17,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,199 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:17,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-22T06:04:17,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:17,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-22T06:04:17,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-22T06:04:17,201 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:17,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-22T06:04:17,202 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:17,204 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742147_1323 (size=170) 2024-11-22T06:04:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742147_1323 (size=170) 2024-11-22T06:04:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742147_1323 (size=170) 2024-11-22T06:04:17,211 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:17,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8}] 2024-11-22T06:04:17,212 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd 2024-11-22T06:04:17,212 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:17,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-22T06:04:17,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-22T06:04:17,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:17,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-22T06:04:17,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for bade74021296d5cb036873466d5300bd: 2024-11-22T06:04:17,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 741a7efeeb2abbf2cefdb07f109c09a8: 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:17,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:04:17,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742148_1324 (size=71) 2024-11-22T06:04:17,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742148_1324 (size=71) 2024-11-22T06:04:17,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742148_1324 (size=71) 2024-11-22T06:04:17,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:17,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-22T06:04:17,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-22T06:04:17,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:17,376 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:17,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8 in 165 msec 2024-11-22T06:04:17,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742149_1325 (size=71) 2024-11-22T06:04:17,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742149_1325 (size=71) 2024-11-22T06:04:17,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742149_1325 (size=71) 2024-11-22T06:04:17,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:17,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-22T06:04:17,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-22T06:04:17,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region bade74021296d5cb036873466d5300bd 2024-11-22T06:04:17,383 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd 2024-11-22T06:04:17,385 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=137, resume processing ppid=136 2024-11-22T06:04:17,385 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd in 173 msec 2024-11-22T06:04:17,385 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:17,386 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:17,387 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:17,387 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,387 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742150_1326 (size=552) 2024-11-22T06:04:17,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742150_1326 (size=552) 2024-11-22T06:04:17,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742150_1326 (size=552) 2024-11-22T06:04:17,396 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:17,400 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:17,401 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,402 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:17,402 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-22T06:04:17,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 202 msec 2024-11-22T06:04:17,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-22T06:04:17,519 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-22T06:04:17,523 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='0751862fd3db4bcddafcfb38510c593ea', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:04:17,524 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='12660664c98d94e887cdd0c58b025cc27', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:17,525 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='3d646f2250ab96f776a73800cf558ab0e', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:17,525 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='2f828626df9351e24ee04bda59b75b9d2', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:17,526 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='42aad891f314b9547b866df69772ddd23', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:17,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:17,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:17,532 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:17,534 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-22T06:04:17,534 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:17,534 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:17,535 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:17,539 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:17,544 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:17,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-22T06:04:17,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255457546 (current time:1732255457546). 2024-11-22T06:04:17,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:04:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-22T06:04:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cac3eae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:17,548 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:17,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:17,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:17,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24c9e41c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:17,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:17,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,549 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38736, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:17,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ae82f50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:17,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:17,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:17,551 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38004, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:17,553 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,553 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:17,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@518e9a4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:17,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:17,554 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:17,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:17,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:17,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@601db364, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:17,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:17,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,556 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38756, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:17,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2abf67c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:17,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:17,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:17,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:17,559 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:17,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:17,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:17,562 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-22T06:04:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:17,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-22T06:04:17,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-22T06:04:17,564 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:17,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-22T06:04:17,565 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:17,567 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:17,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742151_1327 (size=165) 2024-11-22T06:04:17,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742151_1327 (size=165) 2024-11-22T06:04:17,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742151_1327 (size=165) 2024-11-22T06:04:17,574 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:17,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8}] 2024-11-22T06:04:17,575 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd 2024-11-22T06:04:17,575 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:17,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-22T06:04:17,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-22T06:04:17,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:17,726 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing bade74021296d5cb036873466d5300bd 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-22T06:04:17,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-22T06:04:17,727 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:17,727 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 741a7efeeb2abbf2cefdb07f109c09a8 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-22T06:04:17,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/.tmp/cf/2ac6343c876e4b0d8815ecab92f7de06 is 71, key is 0e37a71e8c7580007491edfd91e57d8b/cf:q/1732255457528/Put/seqid=0 2024-11-22T06:04:17,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/.tmp/cf/389ee1bde5024f7eb2f7faebd1960d53 is 71, key is 1337f9e2121201948f085e8f4244c1d7/cf:q/1732255457530/Put/seqid=0 2024-11-22T06:04:17,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742152_1328 (size=8394) 2024-11-22T06:04:17,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742152_1328 (size=8394) 2024-11-22T06:04:17,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742153_1329 (size=5216) 2024-11-22T06:04:17,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742153_1329 (size=5216) 2024-11-22T06:04:17,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742152_1328 (size=8394) 2024-11-22T06:04:17,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742153_1329 (size=5216) 2024-11-22T06:04:17,751 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/.tmp/cf/389ee1bde5024f7eb2f7faebd1960d53 2024-11-22T06:04:17,751 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/.tmp/cf/2ac6343c876e4b0d8815ecab92f7de06 2024-11-22T06:04:17,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/.tmp/cf/2ac6343c876e4b0d8815ecab92f7de06 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/cf/2ac6343c876e4b0d8815ecab92f7de06 2024-11-22T06:04:17,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/.tmp/cf/389ee1bde5024f7eb2f7faebd1960d53 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/cf/389ee1bde5024f7eb2f7faebd1960d53 2024-11-22T06:04:17,762 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/cf/2ac6343c876e4b0d8815ecab92f7de06, entries=2, sequenceid=6, filesize=5.1 K 2024-11-22T06:04:17,762 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/cf/389ee1bde5024f7eb2f7faebd1960d53, entries=48, sequenceid=6, filesize=8.2 K 2024-11-22T06:04:17,762 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for bade74021296d5cb036873466d5300bd in 36ms, sequenceid=6, compaction requested=false 2024-11-22T06:04:17,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 741a7efeeb2abbf2cefdb07f109c09a8 in 35ms, sequenceid=6, compaction requested=false 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for bade74021296d5cb036873466d5300bd: 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 741a7efeeb2abbf2cefdb07f109c09a8: 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. for snaptb0-testExportExpiredSnapshot completed. 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. for snaptb0-testExportExpiredSnapshot completed. 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/cf/2ac6343c876e4b0d8815ecab92f7de06] hfiles 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/cf/389ee1bde5024f7eb2f7faebd1960d53] hfiles 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/cf/2ac6343c876e4b0d8815ecab92f7de06 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/cf/389ee1bde5024f7eb2f7faebd1960d53 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742155_1331 (size=110) 2024-11-22T06:04:17,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742154_1330 (size=110) 2024-11-22T06:04:17,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742155_1331 (size=110) 2024-11-22T06:04:17,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:17,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:17,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-22T06:04:17,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-22T06:04:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742154_1330 (size=110) 2024-11-22T06:04:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742154_1330 (size=110) 2024-11-22T06:04:17,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742155_1331 (size=110) 2024-11-22T06:04:17,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-22T06:04:17,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-22T06:04:17,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region bade74021296d5cb036873466d5300bd 2024-11-22T06:04:17,772 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:17,772 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd 2024-11-22T06:04:17,772 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:17,774 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bade74021296d5cb036873466d5300bd in 199 msec 2024-11-22T06:04:17,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=139 2024-11-22T06:04:17,775 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:17,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8 in 199 msec 2024-11-22T06:04:17,775 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:17,776 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:17,776 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,777 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742156_1332 (size=630) 2024-11-22T06:04:17,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742156_1332 (size=630) 2024-11-22T06:04:17,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742156_1332 (size=630) 2024-11-22T06:04:17,786 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:17,790 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:17,790 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:17,791 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:17,791 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-22T06:04:17,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 229 msec 2024-11-22T06:04:17,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-22T06:04:17,881 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-22T06:04:17,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:04:17,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-22T06:04:17,886 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:04:17,887 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:17,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-22T06:04:17,888 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:04:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-22T06:04:17,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742157_1333 (size=400) 2024-11-22T06:04:17,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742157_1333 (size=400) 2024-11-22T06:04:17,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742157_1333 (size=400) 2024-11-22T06:04:17,899 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bcf300d151c9df6bfc8b093b51457a6c, NAME => 'testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:17,900 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c1407c3a5417180f7dcb0e325387e8f9, NAME => 'testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:17,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742159_1335 (size=61) 2024-11-22T06:04:17,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742159_1335 (size=61) 2024-11-22T06:04:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742158_1334 (size=61) 2024-11-22T06:04:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742159_1335 (size=61) 2024-11-22T06:04:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742158_1334 (size=61) 2024-11-22T06:04:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742158_1334 (size=61) 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing c1407c3a5417180f7dcb0e325387e8f9, disabling compactions & flushes 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing bcf300d151c9df6bfc8b093b51457a6c, disabling compactions & flushes 2024-11-22T06:04:17,910 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:17,910 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. after waiting 0 ms 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:17,910 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. after waiting 0 ms 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for c1407c3a5417180f7dcb0e325387e8f9: Waiting for close lock at 1732255457910Disabling compacts and flushes for region at 1732255457910Disabling writes for close at 1732255457910Writing region close event to WAL at 1732255457910Closed at 1732255457910 2024-11-22T06:04:17,910 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:17,911 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:17,911 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for bcf300d151c9df6bfc8b093b51457a6c: Waiting for close lock at 1732255457910Disabling compacts and flushes for region at 1732255457910Disabling writes for close at 1732255457910Writing region close event to WAL at 1732255457910Closed at 1732255457910 2024-11-22T06:04:17,912 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:04:17,912 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732255457912"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255457912"}]},"ts":"1732255457912"} 2024-11-22T06:04:17,912 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732255457912"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255457912"}]},"ts":"1732255457912"} 2024-11-22T06:04:17,914 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:04:17,915 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:04:17,915 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255457915"}]},"ts":"1732255457915"} 2024-11-22T06:04:17,917 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-22T06:04:17,917 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:04:17,918 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:04:17,918 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:04:17,918 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:04:17,918 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:04:17,918 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:04:17,918 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:04:17,918 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:04:17,918 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:04:17,918 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:04:17,918 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:04:17,919 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=bcf300d151c9df6bfc8b093b51457a6c, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c1407c3a5417180f7dcb0e325387e8f9, ASSIGN}] 2024-11-22T06:04:17,920 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c1407c3a5417180f7dcb0e325387e8f9, ASSIGN 2024-11-22T06:04:17,920 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=bcf300d151c9df6bfc8b093b51457a6c, ASSIGN 2024-11-22T06:04:17,920 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=bcf300d151c9df6bfc8b093b51457a6c, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:04:17,920 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c1407c3a5417180f7dcb0e325387e8f9, ASSIGN; state=OFFLINE, location=407411ea95db,41799,1732255321718; forceNewPlan=false, retain=false 2024-11-22T06:04:17,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-22T06:04:18,071 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:04:18,071 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=c1407c3a5417180f7dcb0e325387e8f9, regionState=OPENING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:04:18,071 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=bcf300d151c9df6bfc8b093b51457a6c, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:18,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c1407c3a5417180f7dcb0e325387e8f9, ASSIGN because future has completed 2024-11-22T06:04:18,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1407c3a5417180f7dcb0e325387e8f9, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:04:18,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=bcf300d151c9df6bfc8b093b51457a6c, ASSIGN because future has completed 2024-11-22T06:04:18,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure bcf300d151c9df6bfc8b093b51457a6c, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:18,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-22T06:04:18,229 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => c1407c3a5417180f7dcb0e325387e8f9, NAME => 'testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. service=AccessControlService 2024-11-22T06:04:18,229 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => bcf300d151c9df6bfc8b093b51457a6c, NAME => 'testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:04:18,229 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. service=AccessControlService 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,229 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,229 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,230 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:18,230 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,230 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,231 INFO [StoreOpener-bcf300d151c9df6bfc8b093b51457a6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,231 INFO [StoreOpener-c1407c3a5417180f7dcb0e325387e8f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,232 INFO [StoreOpener-c1407c3a5417180f7dcb0e325387e8f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1407c3a5417180f7dcb0e325387e8f9 columnFamilyName cf 2024-11-22T06:04:18,232 INFO [StoreOpener-bcf300d151c9df6bfc8b093b51457a6c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bcf300d151c9df6bfc8b093b51457a6c columnFamilyName cf 2024-11-22T06:04:18,232 DEBUG [StoreOpener-c1407c3a5417180f7dcb0e325387e8f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:18,232 DEBUG [StoreOpener-bcf300d151c9df6bfc8b093b51457a6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:18,233 INFO [StoreOpener-c1407c3a5417180f7dcb0e325387e8f9-1 {}] regionserver.HStore(327): Store=c1407c3a5417180f7dcb0e325387e8f9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:18,233 INFO [StoreOpener-bcf300d151c9df6bfc8b093b51457a6c-1 {}] regionserver.HStore(327): Store=bcf300d151c9df6bfc8b093b51457a6c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:18,233 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,233 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,234 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,236 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,236 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,237 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:18,237 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:18,238 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened bcf300d151c9df6bfc8b093b51457a6c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74597385, jitterRate=0.11158765852451324}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:18,238 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened c1407c3a5417180f7dcb0e325387e8f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60678692, jitterRate=-0.09581702947616577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:18,238 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,238 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,239 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for bcf300d151c9df6bfc8b093b51457a6c: Running coprocessor pre-open hook at 1732255458230Writing region info on filesystem at 1732255458230Initializing all the Stores at 1732255458230Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255458230Cleaning up temporary data from old regions at 1732255458234 (+4 ms)Running coprocessor post-open hooks at 1732255458238 (+4 ms)Region opened successfully at 1732255458239 (+1 ms) 2024-11-22T06:04:18,239 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for c1407c3a5417180f7dcb0e325387e8f9: Running coprocessor pre-open hook at 1732255458229Writing region info on filesystem at 1732255458230 (+1 ms)Initializing all the Stores at 1732255458230Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255458230Cleaning up temporary data from old regions at 1732255458234 (+4 ms)Running coprocessor post-open hooks at 1732255458238 (+4 ms)Region opened successfully at 1732255458239 (+1 ms) 2024-11-22T06:04:18,240 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c., pid=146, masterSystemTime=1732255458227 2024-11-22T06:04:18,240 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9., pid=145, masterSystemTime=1732255458225 2024-11-22T06:04:18,241 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:18,241 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:18,241 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=c1407c3a5417180f7dcb0e325387e8f9, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:04:18,241 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:18,242 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:18,242 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=bcf300d151c9df6bfc8b093b51457a6c, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:18,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1407c3a5417180f7dcb0e325387e8f9, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:04:18,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure bcf300d151c9df6bfc8b093b51457a6c, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:04:18,245 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=144 2024-11-22T06:04:18,245 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure c1407c3a5417180f7dcb0e325387e8f9, server=407411ea95db,41799,1732255321718 in 170 msec 2024-11-22T06:04:18,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=143 2024-11-22T06:04:18,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure bcf300d151c9df6bfc8b093b51457a6c, server=407411ea95db,44619,1732255321573 in 169 msec 2024-11-22T06:04:18,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c1407c3a5417180f7dcb0e325387e8f9, ASSIGN in 327 msec 2024-11-22T06:04:18,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=143, resume processing ppid=142 2024-11-22T06:04:18,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=bcf300d151c9df6bfc8b093b51457a6c, ASSIGN in 328 msec 2024-11-22T06:04:18,248 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:04:18,248 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255458248"}]},"ts":"1732255458248"} 2024-11-22T06:04:18,249 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-22T06:04:18,250 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:04:18,250 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-22T06:04:18,252 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-22T06:04:18,314 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:18,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:18,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:18,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:18,322 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,322 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,322 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,323 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,323 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,323 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,323 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,323 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:18,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 438 msec 2024-11-22T06:04:18,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-22T06:04:18,520 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-22T06:04:18,520 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-22T06:04:18,520 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:18,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-22T06:04:18,527 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:18,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-11-22T06:04:18,527 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:18,533 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='044ac71f57086c223282da76b5c424062', locateType=CURRENT is [region=testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:04:18,534 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='191fa24cb390fb69c6670d68601b7efb9', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:18,536 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='28bd335e4eb188bd71158b813aa24443c', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:18,536 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='4c9b2034f36dcdcc22ac8eb67162dd23d', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:18,537 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='35981f2a86f22d550908ece82d4d856b7', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:18,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:18,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41799 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:18,543 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:18,546 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-22T06:04:18,546 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:18,546 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:18,548 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:18,554 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-22T06:04:18,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-22T06:04:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-22T06:04:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bd0f106, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:18,563 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:18,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:18,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:18,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@343c114a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:18,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:18,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:18,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:18,564 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:18,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59533741, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:18,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:18,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:18,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:18,568 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38036, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:18,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:18,570 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70088738, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:18,571 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:18,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:18,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:18,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22c8db64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:18,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:18,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:18,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:18,573 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38796, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:18,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55409620, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:18,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:18,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:18,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:18,576 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:18,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:18,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-22T06:04:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-22T06:04:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-22T06:04:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-22T06:04:18,593 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:18,594 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:18,595 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:18,598 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742160_1336 (size=152) 2024-11-22T06:04:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742160_1336 (size=152) 2024-11-22T06:04:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742160_1336 (size=152) 2024-11-22T06:04:18,610 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:18,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bcf300d151c9df6bfc8b093b51457a6c}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c1407c3a5417180f7dcb0e325387e8f9}] 2024-11-22T06:04:18,611 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,612 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-22T06:04:18,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-22T06:04:18,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-22T06:04:18,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:18,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:18,764 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing bcf300d151c9df6bfc8b093b51457a6c 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-22T06:04:18,764 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing c1407c3a5417180f7dcb0e325387e8f9 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-22T06:04:18,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/.tmp/cf/e9b611a7fedf4b41a1a9b5fb8f031338 is 71, key is 128d907685513af0862a9ffe34b41597/cf:q/1732255458542/Put/seqid=0 2024-11-22T06:04:18,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/.tmp/cf/6c0f2475fab949dfa726a6bb30c5584f is 71, key is 02da01740d9a1df7c99f339b8a21dba1/cf:q/1732255458540/Put/seqid=0 2024-11-22T06:04:18,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742161_1337 (size=8188) 2024-11-22T06:04:18,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742161_1337 (size=8188) 2024-11-22T06:04:18,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742161_1337 (size=8188) 2024-11-22T06:04:18,807 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/.tmp/cf/e9b611a7fedf4b41a1a9b5fb8f031338 2024-11-22T06:04:18,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/.tmp/cf/e9b611a7fedf4b41a1a9b5fb8f031338 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/cf/e9b611a7fedf4b41a1a9b5fb8f031338 2024-11-22T06:04:18,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/cf/e9b611a7fedf4b41a1a9b5fb8f031338, entries=45, sequenceid=5, filesize=8.0 K 2024-11-22T06:04:18,819 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for c1407c3a5417180f7dcb0e325387e8f9 in 55ms, sequenceid=5, compaction requested=false 2024-11-22T06:04:18,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-22T06:04:18,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for c1407c3a5417180f7dcb0e325387e8f9: 2024-11-22T06:04:18,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. for snapshot-testExportExpiredSnapshot completed. 2024-11-22T06:04:18,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-22T06:04:18,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:18,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/cf/e9b611a7fedf4b41a1a9b5fb8f031338] hfiles 2024-11-22T06:04:18,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/cf/e9b611a7fedf4b41a1a9b5fb8f031338 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-22T06:04:18,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742162_1338 (size=5422) 2024-11-22T06:04:18,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742162_1338 (size=5422) 2024-11-22T06:04:18,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742162_1338 (size=5422) 2024-11-22T06:04:18,821 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/.tmp/cf/6c0f2475fab949dfa726a6bb30c5584f 2024-11-22T06:04:18,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742163_1339 (size=103) 2024-11-22T06:04:18,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742163_1339 (size=103) 2024-11-22T06:04:18,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742163_1339 (size=103) 2024-11-22T06:04:18,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:04:18,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-22T06:04:18,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/.tmp/cf/6c0f2475fab949dfa726a6bb30c5584f as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/cf/6c0f2475fab949dfa726a6bb30c5584f 2024-11-22T06:04:18,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-22T06:04:18,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,827 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:04:18,831 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/cf/6c0f2475fab949dfa726a6bb30c5584f, entries=5, sequenceid=5, filesize=5.3 K 2024-11-22T06:04:18,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c1407c3a5417180f7dcb0e325387e8f9 in 220 msec 2024-11-22T06:04:18,832 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for bcf300d151c9df6bfc8b093b51457a6c in 68ms, sequenceid=5, compaction requested=false 2024-11-22T06:04:18,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for bcf300d151c9df6bfc8b093b51457a6c: 2024-11-22T06:04:18,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. for snapshot-testExportExpiredSnapshot completed. 2024-11-22T06:04:18,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-22T06:04:18,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:18,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/cf/6c0f2475fab949dfa726a6bb30c5584f] hfiles 2024-11-22T06:04:18,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/cf/6c0f2475fab949dfa726a6bb30c5584f for snapshot=snapshot-testExportExpiredSnapshot 2024-11-22T06:04:18,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742164_1340 (size=103) 2024-11-22T06:04:18,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742164_1340 (size=103) 2024-11-22T06:04:18,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742164_1340 (size=103) 2024-11-22T06:04:18,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:04:18,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-22T06:04:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-22T06:04:18,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,841 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:04:18,844 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=148, resume processing ppid=147 2024-11-22T06:04:18,844 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:18,844 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bcf300d151c9df6bfc8b093b51457a6c in 232 msec 2024-11-22T06:04:18,845 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:18,845 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:18,845 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-22T06:04:18,846 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-22T06:04:18,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742165_1341 (size=609) 2024-11-22T06:04:18,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742165_1341 (size=609) 2024-11-22T06:04:18,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742165_1341 (size=609) 2024-11-22T06:04:18,863 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:18,868 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:18,868 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-22T06:04:18,869 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:18,870 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-22T06:04:18,871 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 289 msec 2024-11-22T06:04:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-22T06:04:18,909 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-22T06:04:19,899 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0006_000001 (auth:SIMPLE) from 127.0.0.1:43698 2024-11-22T06:04:19,910 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0006/container_1732255328437_0006_01_000001/launch_container.sh] 2024-11-22T06:04:19,911 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0006/container_1732255328437_0006_01_000001/container_tokens] 2024-11-22T06:04:19,911 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0006/container_1732255328437_0006_01_000001/sysfs] 2024-11-22T06:04:21,315 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:04:21,345 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-22T06:04:21,345 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-22T06:04:21,345 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-22T06:04:21,345 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-22T06:04:21,346 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-22T06:04:21,346 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-22T06:04:26,848 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:04:28,916 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255468916 2024-11-22T06:04:28,917 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255468916, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255468916, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:28,945 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:28,945 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255468916, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255468916/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-22T06:04:28,947 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:04:28,949 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:04:28,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-22T06:04:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:28,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-22T06:04:28,954 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255468953"}]},"ts":"1732255468953"} 2024-11-22T06:04:28,955 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-22T06:04:28,956 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-22T06:04:28,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-22T06:04:28,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, UNASSIGN}] 2024-11-22T06:04:28,959 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, UNASSIGN 2024-11-22T06:04:28,959 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, UNASSIGN 2024-11-22T06:04:28,960 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=741a7efeeb2abbf2cefdb07f109c09a8, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:04:28,960 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=bade74021296d5cb036873466d5300bd, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:28,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, UNASSIGN because future has completed 2024-11-22T06:04:28,962 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:04:28,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:04:28,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, UNASSIGN because future has completed 2024-11-22T06:04:28,963 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:04:28,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure bade74021296d5cb036873466d5300bd, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-22T06:04:29,114 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close bade74021296d5cb036873466d5300bd 2024-11-22T06:04:29,115 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing bade74021296d5cb036873466d5300bd, disabling compactions & flushes 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing 741a7efeeb2abbf2cefdb07f109c09a8, disabling compactions & flushes 2024-11-22T06:04:29,115 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:29,115 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. after waiting 0 ms 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. after waiting 0 ms 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:29,115 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:29,121 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:04:29,121 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:04:29,121 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8. 2024-11-22T06:04:29,121 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for 741a7efeeb2abbf2cefdb07f109c09a8: Waiting for close lock at 1732255469115Running coprocessor pre-close hooks at 1732255469115Disabling compacts and flushes for region at 1732255469115Disabling writes for close at 1732255469115Writing region close event to WAL at 1732255469115Running coprocessor post-close hooks at 1732255469121 (+6 ms)Closed at 1732255469121 2024-11-22T06:04:29,123 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed 741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:29,124 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=741a7efeeb2abbf2cefdb07f109c09a8, regionState=CLOSED 2024-11-22T06:04:29,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:04:29,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-22T06:04:29,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 741a7efeeb2abbf2cefdb07f109c09a8, server=407411ea95db,35283,1732255321784 in 165 msec 2024-11-22T06:04:29,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=741a7efeeb2abbf2cefdb07f109c09a8, UNASSIGN in 171 msec 2024-11-22T06:04:29,135 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:04:29,136 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:04:29,136 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd. 2024-11-22T06:04:29,136 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for bade74021296d5cb036873466d5300bd: Waiting for close lock at 1732255469115Running coprocessor pre-close hooks at 1732255469115Disabling compacts and flushes for region at 1732255469115Disabling writes for close at 1732255469115Writing region close event to WAL at 1732255469116 (+1 ms)Running coprocessor post-close hooks at 1732255469136 (+20 ms)Closed at 1732255469136 2024-11-22T06:04:29,138 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed bade74021296d5cb036873466d5300bd 2024-11-22T06:04:29,138 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=bade74021296d5cb036873466d5300bd, regionState=CLOSED 2024-11-22T06:04:29,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure bade74021296d5cb036873466d5300bd, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:04:29,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=152 2024-11-22T06:04:29,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure bade74021296d5cb036873466d5300bd, server=407411ea95db,44619,1732255321573 in 179 msec 2024-11-22T06:04:29,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=151 2024-11-22T06:04:29,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bade74021296d5cb036873466d5300bd, UNASSIGN in 187 msec 2024-11-22T06:04:29,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-22T06:04:29,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 192 msec 2024-11-22T06:04:29,151 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255469151"}]},"ts":"1732255469151"} 2024-11-22T06:04:29,157 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-22T06:04:29,157 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-22T06:04:29,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 208 msec 2024-11-22T06:04:29,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-22T06:04:29,269 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-22T06:04:29,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,272 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,273 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,275 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,277 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd 2024-11-22T06:04:29,277 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:29,278 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/recovered.edits] 2024-11-22T06:04:29,278 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/recovered.edits] 2024-11-22T06:04:29,283 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/cf/389ee1bde5024f7eb2f7faebd1960d53 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/cf/389ee1bde5024f7eb2f7faebd1960d53 2024-11-22T06:04:29,283 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/cf/2ac6343c876e4b0d8815ecab92f7de06 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/cf/2ac6343c876e4b0d8815ecab92f7de06 2024-11-22T06:04:29,286 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd/recovered.edits/9.seqid 2024-11-22T06:04:29,286 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/bade74021296d5cb036873466d5300bd 2024-11-22T06:04:29,286 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8/recovered.edits/9.seqid 2024-11-22T06:04:29,287 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportExpiredSnapshot/741a7efeeb2abbf2cefdb07f109c09a8 2024-11-22T06:04:29,287 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-22T06:04:29,289 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,292 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-22T06:04:29,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,653 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-22T06:04:29,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-22T06:04:29,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-22T06:04:29,655 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-22T06:04:29,657 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,657 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-22T06:04:29,657 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255469657"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:29,657 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255469657"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:29,660 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:04:29,660 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => bade74021296d5cb036873466d5300bd, NAME => 'testtb-testExportExpiredSnapshot,,1732255456535.bade74021296d5cb036873466d5300bd.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 741a7efeeb2abbf2cefdb07f109c09a8, NAME => 'testtb-testExportExpiredSnapshot,1,1732255456535.741a7efeeb2abbf2cefdb07f109c09a8.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:04:29,660 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-22T06:04:29,660 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255469660"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:29,662 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-22T06:04:29,663 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 393 msec 2024-11-22T06:04:29,671 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:29,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:29,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:29,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:29,671 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-11-22T06:04:29,672 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-22T06:04:29,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:29,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:29,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:29,673 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:29,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-22T06:04:29,673 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-22T06:04:29,673 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-22T06:04:29,682 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-22T06:04:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-22T06:04:29,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-22T06:04:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-22T06:04:29,690 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-22T06:04:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-22T06:04:29,709 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=798 (was 802), OpenFileDescriptor=793 (was 802), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=489 (was 558), ProcessCount=14 (was 16), AvailableMemoryMB=5653 (was 3929) - AvailableMemoryMB LEAK? - 2024-11-22T06:04:29,710 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-11-22T06:04:29,721 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:04:29,727 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=798, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=489, ProcessCount=14, AvailableMemoryMB=5651 2024-11-22T06:04:29,728 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-11-22T06:04:29,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:04:29,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:29,732 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:04:29,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-22T06:04:29,732 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-22T06:04:29,734 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:04:29,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742166_1342 (size=412) 2024-11-22T06:04:29,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742166_1342 (size=412) 2024-11-22T06:04:29,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742166_1342 (size=412) 2024-11-22T06:04:29,749 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dadc2a2cdf2ab46b32b7c1cab9201d3d, NAME => 'testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:29,751 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 612f8bf9f13cacc84e04c86a0fadb058, NAME => 'testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:29,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742167_1343 (size=73) 2024-11-22T06:04:29,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742167_1343 (size=73) 2024-11-22T06:04:29,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742167_1343 (size=73) 2024-11-22T06:04:29,756 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:29,756 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing dadc2a2cdf2ab46b32b7c1cab9201d3d, disabling compactions & flushes 2024-11-22T06:04:29,756 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:29,756 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:29,756 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. after waiting 0 ms 2024-11-22T06:04:29,756 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:29,756 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:29,756 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for dadc2a2cdf2ab46b32b7c1cab9201d3d: Waiting for close lock at 1732255469756Disabling compacts and flushes for region at 1732255469756Disabling writes for close at 1732255469756Writing region close event to WAL at 1732255469756Closed at 1732255469756 2024-11-22T06:04:29,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742168_1344 (size=73) 2024-11-22T06:04:29,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742168_1344 (size=73) 2024-11-22T06:04:29,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742168_1344 (size=73) 2024-11-22T06:04:29,763 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:29,763 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 612f8bf9f13cacc84e04c86a0fadb058, disabling compactions & flushes 2024-11-22T06:04:29,763 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:29,763 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:29,763 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. after waiting 0 ms 2024-11-22T06:04:29,763 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:29,763 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:29,763 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 612f8bf9f13cacc84e04c86a0fadb058: Waiting for close lock at 1732255469763Disabling compacts and flushes for region at 1732255469763Disabling writes for close at 1732255469763Writing region close event to WAL at 1732255469763Closed at 1732255469763 2024-11-22T06:04:29,764 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:04:29,764 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732255469764"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255469764"}]},"ts":"1732255469764"} 2024-11-22T06:04:29,764 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732255469764"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255469764"}]},"ts":"1732255469764"} 2024-11-22T06:04:29,767 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:04:29,768 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:04:29,768 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255469768"}]},"ts":"1732255469768"} 2024-11-22T06:04:29,770 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-22T06:04:29,771 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:04:29,772 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:04:29,772 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:04:29,772 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:04:29,772 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:04:29,772 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:04:29,772 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:04:29,772 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:04:29,772 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:04:29,772 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:04:29,772 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:04:29,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, ASSIGN}] 2024-11-22T06:04:29,774 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, ASSIGN 2024-11-22T06:04:29,774 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, ASSIGN 2024-11-22T06:04:29,775 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:04:29,775 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:04:29,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-22T06:04:29,925 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:04:29,926 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=612f8bf9f13cacc84e04c86a0fadb058, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:04:29,926 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=dadc2a2cdf2ab46b32b7c1cab9201d3d, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:29,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, ASSIGN because future has completed 2024-11-22T06:04:29,928 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:29,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, ASSIGN because future has completed 2024-11-22T06:04:29,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:04:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-22T06:04:30,087 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:30,087 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => dadc2a2cdf2ab46b32b7c1cab9201d3d, NAME => 'testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:04:30,088 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. service=AccessControlService 2024-11-22T06:04:30,088 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:30,088 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,088 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:30,088 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,088 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,090 INFO [StoreOpener-dadc2a2cdf2ab46b32b7c1cab9201d3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,093 INFO [StoreOpener-dadc2a2cdf2ab46b32b7c1cab9201d3d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dadc2a2cdf2ab46b32b7c1cab9201d3d columnFamilyName cf 2024-11-22T06:04:30,093 DEBUG [StoreOpener-dadc2a2cdf2ab46b32b7c1cab9201d3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:30,095 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:30,095 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => 612f8bf9f13cacc84e04c86a0fadb058, NAME => 'testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:04:30,095 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. service=AccessControlService 2024-11-22T06:04:30,096 INFO [StoreOpener-dadc2a2cdf2ab46b32b7c1cab9201d3d-1 {}] regionserver.HStore(327): Store=dadc2a2cdf2ab46b32b7c1cab9201d3d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:30,096 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:30,096 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,096 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,096 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:30,096 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,096 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,097 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,098 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,098 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,098 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,101 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,103 INFO [StoreOpener-612f8bf9f13cacc84e04c86a0fadb058-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,103 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:30,104 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened dadc2a2cdf2ab46b32b7c1cab9201d3d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71185374, jitterRate=0.06074473261833191}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:30,104 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,105 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for dadc2a2cdf2ab46b32b7c1cab9201d3d: Running coprocessor pre-open hook at 1732255470088Writing region info on filesystem at 1732255470088Initializing all the Stores at 1732255470089 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255470089Cleaning up temporary data from old regions at 1732255470098 (+9 ms)Running coprocessor post-open hooks at 1732255470104 (+6 ms)Region opened successfully at 1732255470105 (+1 ms) 2024-11-22T06:04:30,107 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d., pid=160, masterSystemTime=1732255470082 2024-11-22T06:04:30,108 INFO [StoreOpener-612f8bf9f13cacc84e04c86a0fadb058-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 612f8bf9f13cacc84e04c86a0fadb058 columnFamilyName cf 2024-11-22T06:04:30,108 DEBUG [StoreOpener-612f8bf9f13cacc84e04c86a0fadb058-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:30,109 INFO [StoreOpener-612f8bf9f13cacc84e04c86a0fadb058-1 {}] regionserver.HStore(327): Store=612f8bf9f13cacc84e04c86a0fadb058/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:30,111 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,114 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,115 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,115 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:30,115 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:30,116 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,116 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=dadc2a2cdf2ab46b32b7c1cab9201d3d, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:30,116 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,119 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,119 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=160, ppid=158, state=RUNNABLE, hasLock=true; OpenRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d, server=407411ea95db,44619,1732255321573 2024-11-22T06:04:30,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=158 2024-11-22T06:04:30,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d, server=407411ea95db,44619,1732255321573 in 191 msec 2024-11-22T06:04:30,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, ASSIGN in 351 msec 2024-11-22T06:04:30,131 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:30,132 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened 612f8bf9f13cacc84e04c86a0fadb058; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68030245, jitterRate=0.013729646801948547}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:30,132 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,133 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for 612f8bf9f13cacc84e04c86a0fadb058: Running coprocessor pre-open hook at 1732255470096Writing region info on filesystem at 1732255470096Initializing all the Stores at 1732255470097 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255470097Cleaning up temporary data from old regions at 1732255470116 (+19 ms)Running coprocessor post-open hooks at 1732255470132 (+16 ms)Region opened successfully at 1732255470132 2024-11-22T06:04:30,134 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., pid=161, masterSystemTime=1732255470085 2024-11-22T06:04:30,136 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:30,136 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:30,137 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=612f8bf9f13cacc84e04c86a0fadb058, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:04:30,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:04:30,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=159 2024-11-22T06:04:30,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058, server=407411ea95db,35283,1732255321784 in 208 msec 2024-11-22T06:04:30,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-22T06:04:30,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, ASSIGN in 369 msec 2024-11-22T06:04:30,148 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:04:30,148 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255470148"}]},"ts":"1732255470148"} 2024-11-22T06:04:30,150 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-22T06:04:30,151 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:04:30,151 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-22T06:04:30,155 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-22T06:04:30,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:30,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:30,195 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:30,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:30,204 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,204 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,204 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,204 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,205 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:30,206 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 474 msec 2024-11-22T06:04:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-22T06:04:30,360 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-22T06:04:30,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-11-22T06:04:30,361 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:30,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-11-22T06:04:30,366 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:30,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-11-22T06:04:30,366 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:04:30,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:04:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255470371 (current time:1732255470371). 2024-11-22T06:04:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:04:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-22T06:04:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@511e08f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:30,374 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:30,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:30,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:30,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24129f24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:30,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:30,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:30,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:30,377 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:30,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73b2f260, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:30,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:30,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:30,383 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:30,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:30,385 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@188a80f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:30,388 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:30,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:30,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:30,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3955e14f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:30,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:30,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:30,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:30,392 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:30,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1098d59a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:30,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:30,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:30,397 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52118, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:30,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:30,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:30,402 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-22T06:04:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:04:30,407 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:30,408 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-22T06:04:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-22T06:04:30,412 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:30,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742169_1345 (size=185) 2024-11-22T06:04:30,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742169_1345 (size=185) 2024-11-22T06:04:30,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742169_1345 (size=185) 2024-11-22T06:04:30,448 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:30,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058}] 2024-11-22T06:04:30,449 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,449 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-22T06:04:30,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-22T06:04:30,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for dadc2a2cdf2ab46b32b7c1cab9201d3d: 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for 612f8bf9f13cacc84e04c86a0fadb058: 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:04:30,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:04:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-22T06:04:30,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742170_1346 (size=76) 2024-11-22T06:04:30,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742170_1346 (size=76) 2024-11-22T06:04:30,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742170_1346 (size=76) 2024-11-22T06:04:30,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:30,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-22T06:04:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-22T06:04:30,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,767 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:30,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d in 321 msec 2024-11-22T06:04:30,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742171_1347 (size=76) 2024-11-22T06:04:30,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742171_1347 (size=76) 2024-11-22T06:04:30,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:30,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-22T06:04:30,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-22T06:04:30,830 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,830 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:30,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742171_1347 (size=76) 2024-11-22T06:04:30,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=162 2024-11-22T06:04:30,834 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:30,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058 in 384 msec 2024-11-22T06:04:30,836 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:30,837 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:30,837 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:30,839 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:30,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742172_1348 (size=567) 2024-11-22T06:04:30,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742172_1348 (size=567) 2024-11-22T06:04:30,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742172_1348 (size=567) 2024-11-22T06:04:30,901 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:30,906 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:30,907 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:30,909 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:30,910 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-22T06:04:30,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 506 msec 2024-11-22T06:04:31,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-22T06:04:31,040 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-22T06:04:31,046 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='05ad0f26b5657a4fa357a22889d5345a1', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:04:31,050 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1170a0375aafe0d4f6df22a09b251d681', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:31,052 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='280de0e35ffea43aa26d14e8effeda0d4', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:31,053 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='347add360b64cdf22903e91d42fb15940', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:31,055 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='5a63be8e873ab214f29317a9976b19ebc', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:31,056 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='4bf3c54a03bb5680d1d651ac46851f041', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:31,056 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='6132c7b9bf6e6d534cdc05152b930e2f8', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:31,057 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='bcc93d0c0cee8b1b6523814d84dc4294', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:04:31,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:31,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:31,067 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:04:31,072 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-22T06:04:31,072 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:31,072 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:31,075 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:04:31,083 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:04:31,092 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-22T06:04:31,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:04:31,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255471096 (current time:1732255471096). 2024-11-22T06:04:31,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:04:31,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-22T06:04:31,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:31,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b24dc52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:31,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:31,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:31,098 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:31,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:31,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:31,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49393a79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:31,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:31,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:31,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:31,100 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41578, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:31,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19f2f6af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:31,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:31,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:31,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:31,104 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52126, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:31,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:31,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:31,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:31,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:31,106 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:31,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@670226b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:31,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:31,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:31,108 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:31,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:31,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:31,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2be5da4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:31,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:31,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:31,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:31,111 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41598, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:31,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc829cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:31,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:31,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:31,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:31,117 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:31,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:31,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429. 2024-11-22T06:04:31,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:31,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:31,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:31,122 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:31,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-22T06:04:31,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:31,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-22T06:04:31,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-22T06:04:31,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-22T06:04:31,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-22T06:04:31,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-22T06:04:31,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-22T06:04:31,127 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:31,129 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:31,132 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:31,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742173_1349 (size=180) 2024-11-22T06:04:31,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742173_1349 (size=180) 2024-11-22T06:04:31,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742173_1349 (size=180) 2024-11-22T06:04:31,154 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:31,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058}] 2024-11-22T06:04:31,155 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:31,155 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:31,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-22T06:04:31,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-22T06:04:31,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-22T06:04:31,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:31,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:31,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing 612f8bf9f13cacc84e04c86a0fadb058 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-22T06:04:31,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing dadc2a2cdf2ab46b32b7c1cab9201d3d 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-22T06:04:31,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/.tmp/cf/eb55e17987c344598e82eead86bc5037 is 71, key is 045a44e64aaa36ec07012becaa9ddcb9/cf:q/1732255471063/Put/seqid=0 2024-11-22T06:04:31,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742174_1350 (size=5216) 2024-11-22T06:04:31,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742174_1350 (size=5216) 2024-11-22T06:04:31,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742174_1350 (size=5216) 2024-11-22T06:04:31,338 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/.tmp/cf/eb55e17987c344598e82eead86bc5037 2024-11-22T06:04:31,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/.tmp/cf/eb55e17987c344598e82eead86bc5037 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/cf/eb55e17987c344598e82eead86bc5037 2024-11-22T06:04:31,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/.tmp/cf/d72b25def2b94eb7b8973338fdb44bf8 is 71, key is 13b258c5749037d085aa6d1d23f30a87/cf:q/1732255471065/Put/seqid=0 2024-11-22T06:04:31,353 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/cf/eb55e17987c344598e82eead86bc5037, entries=2, sequenceid=6, filesize=5.1 K 2024-11-22T06:04:31,354 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for dadc2a2cdf2ab46b32b7c1cab9201d3d in 46ms, sequenceid=6, compaction requested=false 2024-11-22T06:04:31,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-22T06:04:31,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for dadc2a2cdf2ab46b32b7c1cab9201d3d: 2024-11-22T06:04:31,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-22T06:04:31,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:31,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:31,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/cf/eb55e17987c344598e82eead86bc5037] hfiles 2024-11-22T06:04:31,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/cf/eb55e17987c344598e82eead86bc5037 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742175_1351 (size=8394) 2024-11-22T06:04:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742175_1351 (size=8394) 2024-11-22T06:04:31,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742175_1351 (size=8394) 2024-11-22T06:04:31,368 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/.tmp/cf/d72b25def2b94eb7b8973338fdb44bf8 2024-11-22T06:04:31,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/.tmp/cf/d72b25def2b94eb7b8973338fdb44bf8 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/cf/d72b25def2b94eb7b8973338fdb44bf8 2024-11-22T06:04:31,387 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/cf/d72b25def2b94eb7b8973338fdb44bf8, entries=48, sequenceid=6, filesize=8.2 K 2024-11-22T06:04:31,388 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 612f8bf9f13cacc84e04c86a0fadb058 in 80ms, sequenceid=6, compaction requested=false 2024-11-22T06:04:31,388 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for 612f8bf9f13cacc84e04c86a0fadb058: 2024-11-22T06:04:31,388 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-22T06:04:31,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:31,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:31,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/cf/d72b25def2b94eb7b8973338fdb44bf8] hfiles 2024-11-22T06:04:31,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/cf/d72b25def2b94eb7b8973338fdb44bf8 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:31,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742176_1352 (size=115) 2024-11-22T06:04:31,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742176_1352 (size=115) 2024-11-22T06:04:31,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742176_1352 (size=115) 2024-11-22T06:04:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:31,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-22T06:04:31,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-22T06:04:31,401 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:31,401 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:31,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d in 248 msec 2024-11-22T06:04:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742177_1353 (size=115) 2024-11-22T06:04:31,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742177_1353 (size=115) 2024-11-22T06:04:31,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742177_1353 (size=115) 2024-11-22T06:04:31,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-22T06:04:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-22T06:04:31,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:31,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-22T06:04:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-22T06:04:31,822 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:31,822 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:31,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-11-22T06:04:31,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058 in 669 msec 2024-11-22T06:04:31,825 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:31,826 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:31,826 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:31,827 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:31,827 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:31,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742178_1354 (size=645) 2024-11-22T06:04:31,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742178_1354 (size=645) 2024-11-22T06:04:31,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742178_1354 (size=645) 2024-11-22T06:04:31,852 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:31,865 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:31,867 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:31,868 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:31,868 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-22T06:04:31,870 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 745 msec 2024-11-22T06:04:32,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-22T06:04:32,260 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-22T06:04:32,260 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260 2024-11-22T06:04:32,260 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:32,316 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:32,316 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:32,332 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:04:32,348 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:32,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742179_1355 (size=185) 2024-11-22T06:04:32,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742179_1355 (size=185) 2024-11-22T06:04:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742179_1355 (size=185) 2024-11-22T06:04:32,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742180_1356 (size=567) 2024-11-22T06:04:32,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742180_1356 (size=567) 2024-11-22T06:04:32,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742180_1356 (size=567) 2024-11-22T06:04:32,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:32,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:32,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-16528576225056328152.jar 2024-11-22T06:04:33,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-3682643092617679015.jar 2024-11-22T06:04:33,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:33,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:04:33,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:04:33,689 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:04:33,689 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:04:33,689 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:04:33,689 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:04:33,690 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:04:33,690 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:04:33,690 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:04:33,690 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:04:33,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:04:33,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:33,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:33,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:33,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:33,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:33,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:33,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:33,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742181_1357 (size=131440) 2024-11-22T06:04:33,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742181_1357 (size=131440) 2024-11-22T06:04:33,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742181_1357 (size=131440) 2024-11-22T06:04:33,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742182_1358 (size=4188619) 2024-11-22T06:04:33,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742182_1358 (size=4188619) 2024-11-22T06:04:33,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742182_1358 (size=4188619) 2024-11-22T06:04:33,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742183_1359 (size=1323991) 2024-11-22T06:04:33,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742183_1359 (size=1323991) 2024-11-22T06:04:33,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742183_1359 (size=1323991) 2024-11-22T06:04:33,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742184_1360 (size=903736) 2024-11-22T06:04:33,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742184_1360 (size=903736) 2024-11-22T06:04:33,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742184_1360 (size=903736) 2024-11-22T06:04:34,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742185_1361 (size=8360083) 2024-11-22T06:04:34,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742185_1361 (size=8360083) 2024-11-22T06:04:34,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742185_1361 (size=8360083) 2024-11-22T06:04:34,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742186_1362 (size=1877034) 2024-11-22T06:04:34,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742186_1362 (size=1877034) 2024-11-22T06:04:34,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742186_1362 (size=1877034) 2024-11-22T06:04:34,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742187_1363 (size=77835) 2024-11-22T06:04:34,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742187_1363 (size=77835) 2024-11-22T06:04:34,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742187_1363 (size=77835) 2024-11-22T06:04:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742188_1364 (size=30949) 2024-11-22T06:04:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742188_1364 (size=30949) 2024-11-22T06:04:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742188_1364 (size=30949) 2024-11-22T06:04:34,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742189_1365 (size=1597347) 2024-11-22T06:04:34,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742189_1365 (size=1597347) 2024-11-22T06:04:34,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742189_1365 (size=1597347) 2024-11-22T06:04:34,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742190_1366 (size=4695811) 2024-11-22T06:04:34,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742190_1366 (size=4695811) 2024-11-22T06:04:34,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742190_1366 (size=4695811) 2024-11-22T06:04:34,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742191_1367 (size=232957) 2024-11-22T06:04:34,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742191_1367 (size=232957) 2024-11-22T06:04:34,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742191_1367 (size=232957) 2024-11-22T06:04:34,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742192_1368 (size=127628) 2024-11-22T06:04:34,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742192_1368 (size=127628) 2024-11-22T06:04:34,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742192_1368 (size=127628) 2024-11-22T06:04:34,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742193_1369 (size=20406) 2024-11-22T06:04:34,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742193_1369 (size=20406) 2024-11-22T06:04:34,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742193_1369 (size=20406) 2024-11-22T06:04:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742194_1370 (size=6424744) 2024-11-22T06:04:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742194_1370 (size=6424744) 2024-11-22T06:04:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742194_1370 (size=6424744) 2024-11-22T06:04:34,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742195_1371 (size=5175431) 2024-11-22T06:04:34,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742195_1371 (size=5175431) 2024-11-22T06:04:34,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742195_1371 (size=5175431) 2024-11-22T06:04:34,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742196_1372 (size=217634) 2024-11-22T06:04:34,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742196_1372 (size=217634) 2024-11-22T06:04:34,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742196_1372 (size=217634) 2024-11-22T06:04:34,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742197_1373 (size=1832290) 2024-11-22T06:04:34,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742197_1373 (size=1832290) 2024-11-22T06:04:34,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742197_1373 (size=1832290) 2024-11-22T06:04:34,624 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:04:34,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742198_1374 (size=322274) 2024-11-22T06:04:34,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742198_1374 (size=322274) 2024-11-22T06:04:34,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742198_1374 (size=322274) 2024-11-22T06:04:34,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742199_1375 (size=503880) 2024-11-22T06:04:34,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742199_1375 (size=503880) 2024-11-22T06:04:34,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742199_1375 (size=503880) 2024-11-22T06:04:34,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742200_1376 (size=29229) 2024-11-22T06:04:34,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742200_1376 (size=29229) 2024-11-22T06:04:34,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742200_1376 (size=29229) 2024-11-22T06:04:34,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742201_1377 (size=24096) 2024-11-22T06:04:34,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742201_1377 (size=24096) 2024-11-22T06:04:34,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742201_1377 (size=24096) 2024-11-22T06:04:34,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742202_1378 (size=440656) 2024-11-22T06:04:34,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742202_1378 (size=440656) 2024-11-22T06:04:34,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742202_1378 (size=440656) 2024-11-22T06:04:34,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742203_1379 (size=111872) 2024-11-22T06:04:34,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742203_1379 (size=111872) 2024-11-22T06:04:34,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742203_1379 (size=111872) 2024-11-22T06:04:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742204_1380 (size=45609) 2024-11-22T06:04:34,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742204_1380 (size=45609) 2024-11-22T06:04:34,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742204_1380 (size=45609) 2024-11-22T06:04:34,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742205_1381 (size=136454) 2024-11-22T06:04:34,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742205_1381 (size=136454) 2024-11-22T06:04:34,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742205_1381 (size=136454) 2024-11-22T06:04:34,846 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:04:34,848 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-22T06:04:34,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742206_1382 (size=7) 2024-11-22T06:04:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742206_1382 (size=7) 2024-11-22T06:04:34,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742206_1382 (size=7) 2024-11-22T06:04:34,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742207_1383 (size=10) 2024-11-22T06:04:34,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742207_1383 (size=10) 2024-11-22T06:04:34,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742207_1383 (size=10) 2024-11-22T06:04:34,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742208_1384 (size=303637) 2024-11-22T06:04:34,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742208_1384 (size=303637) 2024-11-22T06:04:34,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742208_1384 (size=303637) 2024-11-22T06:04:34,903 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:04:34,903 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:04:35,570 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0007_000001 (auth:SIMPLE) from 127.0.0.1:47858 2024-11-22T06:04:40,882 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0007_000001 (auth:SIMPLE) from 127.0.0.1:43052 2024-11-22T06:04:41,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742209_1385 (size=349263) 2024-11-22T06:04:41,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742209_1385 (size=349263) 2024-11-22T06:04:41,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742209_1385 (size=349263) 2024-11-22T06:04:41,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742210_1386 (size=8568) 2024-11-22T06:04:41,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742210_1386 (size=8568) 2024-11-22T06:04:41,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742210_1386 (size=8568) 2024-11-22T06:04:42,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742211_1387 (size=460) 2024-11-22T06:04:42,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742211_1387 (size=460) 2024-11-22T06:04:42,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742211_1387 (size=460) 2024-11-22T06:04:42,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742212_1388 (size=8568) 2024-11-22T06:04:42,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742212_1388 (size=8568) 2024-11-22T06:04:42,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742212_1388 (size=8568) 2024-11-22T06:04:42,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742213_1389 (size=349263) 2024-11-22T06:04:42,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742213_1389 (size=349263) 2024-11-22T06:04:42,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742213_1389 (size=349263) 2024-11-22T06:04:44,033 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:04:44,034 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:04:44,054 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:44,054 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:04:44,055 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:04:44,055 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:44,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-22T06:04:44,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-22T06:04:44,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:44,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-22T06:04:44,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255472260/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-22T06:04:44,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-22T06:04:44,069 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255484069"}]},"ts":"1732255484069"} 2024-11-22T06:04:44,072 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-22T06:04:44,072 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-22T06:04:44,073 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-22T06:04:44,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, UNASSIGN}] 2024-11-22T06:04:44,079 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, UNASSIGN 2024-11-22T06:04:44,079 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, UNASSIGN 2024-11-22T06:04:44,080 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=612f8bf9f13cacc84e04c86a0fadb058, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:04:44,081 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=dadc2a2cdf2ab46b32b7c1cab9201d3d, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:44,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, UNASSIGN because future has completed 2024-11-22T06:04:44,086 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:04:44,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:04:44,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, UNASSIGN because future has completed 2024-11-22T06:04:44,088 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:04:44,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:44,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-22T06:04:44,240 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:44,240 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:04:44,240 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing 612f8bf9f13cacc84e04c86a0fadb058, disabling compactions & flushes 2024-11-22T06:04:44,240 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:44,240 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:44,240 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. after waiting 0 ms 2024-11-22T06:04:44,240 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:44,241 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:44,241 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:04:44,242 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing dadc2a2cdf2ab46b32b7c1cab9201d3d, disabling compactions & flushes 2024-11-22T06:04:44,242 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:44,242 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:44,242 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. after waiting 0 ms 2024-11-22T06:04:44,242 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:44,261 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:04:44,266 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:04:44,266 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058. 2024-11-22T06:04:44,266 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for 612f8bf9f13cacc84e04c86a0fadb058: Waiting for close lock at 1732255484240Running coprocessor pre-close hooks at 1732255484240Disabling compacts and flushes for region at 1732255484240Disabling writes for close at 1732255484240Writing region close event to WAL at 1732255484243 (+3 ms)Running coprocessor post-close hooks at 1732255484266 (+23 ms)Closed at 1732255484266 2024-11-22T06:04:44,269 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed 612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:44,270 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=612f8bf9f13cacc84e04c86a0fadb058, regionState=CLOSED 2024-11-22T06:04:44,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:04:44,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-22T06:04:44,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure 612f8bf9f13cacc84e04c86a0fadb058, server=407411ea95db,35283,1732255321784 in 189 msec 2024-11-22T06:04:44,279 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:04:44,280 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:04:44,280 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d. 2024-11-22T06:04:44,280 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for dadc2a2cdf2ab46b32b7c1cab9201d3d: Waiting for close lock at 1732255484241Running coprocessor pre-close hooks at 1732255484241Disabling compacts and flushes for region at 1732255484241Disabling writes for close at 1732255484242 (+1 ms)Writing region close event to WAL at 1732255484244 (+2 ms)Running coprocessor post-close hooks at 1732255484280 (+36 ms)Closed at 1732255484280 2024-11-22T06:04:44,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=612f8bf9f13cacc84e04c86a0fadb058, UNASSIGN in 202 msec 2024-11-22T06:04:44,282 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:44,283 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=dadc2a2cdf2ab46b32b7c1cab9201d3d, regionState=CLOSED 2024-11-22T06:04:44,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:04:44,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-22T06:04:44,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure dadc2a2cdf2ab46b32b7c1cab9201d3d, server=407411ea95db,44619,1732255321573 in 199 msec 2024-11-22T06:04:44,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-22T06:04:44,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dadc2a2cdf2ab46b32b7c1cab9201d3d, UNASSIGN in 213 msec 2024-11-22T06:04:44,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-22T06:04:44,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 222 msec 2024-11-22T06:04:44,300 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255484299"}]},"ts":"1732255484299"} 2024-11-22T06:04:44,302 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-22T06:04:44,302 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-22T06:04:44,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 240 msec 2024-11-22T06:04:44,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-22T06:04:44,389 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-22T06:04:44,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,394 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,398 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,399 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,403 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:44,403 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:44,406 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/recovered.edits] 2024-11-22T06:04:44,409 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/recovered.edits] 2024-11-22T06:04:44,411 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/cf/d72b25def2b94eb7b8973338fdb44bf8 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/cf/d72b25def2b94eb7b8973338fdb44bf8 2024-11-22T06:04:44,414 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/cf/eb55e17987c344598e82eead86bc5037 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/cf/eb55e17987c344598e82eead86bc5037 2024-11-22T06:04:44,414 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058/recovered.edits/9.seqid 2024-11-22T06:04:44,415 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/612f8bf9f13cacc84e04c86a0fadb058 2024-11-22T06:04:44,417 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d/recovered.edits/9.seqid 2024-11-22T06:04:44,418 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testEmptyExportFileSystemState/dadc2a2cdf2ab46b32b7c1cab9201d3d 2024-11-22T06:04:44,418 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-22T06:04:44,421 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,424 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-22T06:04:44,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,458 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-22T06:04:44,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-22T06:04:44,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-22T06:04:44,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-22T06:04:44,460 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-22T06:04:44,462 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,462 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-22T06:04:44,462 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255484462"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:44,462 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255484462"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:44,465 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:04:44,465 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => dadc2a2cdf2ab46b32b7c1cab9201d3d, NAME => 'testtb-testEmptyExportFileSystemState,,1732255469729.dadc2a2cdf2ab46b32b7c1cab9201d3d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 612f8bf9f13cacc84e04c86a0fadb058, NAME => 'testtb-testEmptyExportFileSystemState,1,1732255469729.612f8bf9f13cacc84e04c86a0fadb058.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:04:44,465 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-22T06:04:44,465 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255484465"}]},"ts":"9223372036854775807"} 2024-11-22T06:04:44,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:44,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:44,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:44,467 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,467 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:44,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-22T06:04:44,468 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:44,468 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:44,469 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:44,469 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:44,470 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-22T06:04:44,471 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 81 msec 2024-11-22T06:04:44,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-22T06:04:44,571 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-22T06:04:44,571 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-22T06:04:44,579 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-22T06:04:44,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:44,584 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-22T06:04:44,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-22T06:04:44,614 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=808 (was 798) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1680381719_1 at /127.0.0.1:54842 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5307 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:37203 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 129820) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37203 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:33903 from appattempt_1732255328437_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:36680 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1680381719_1 at /127.0.0.1:41322 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:54866 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:45830 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=539 (was 489) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=4913 (was 5651) 2024-11-22T06:04:44,614 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-11-22T06:04:44,639 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=808, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=539, ProcessCount=17, AvailableMemoryMB=4911 2024-11-22T06:04:44,639 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-11-22T06:04:44,643 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:04:44,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:04:44,649 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:04:44,649 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:44,649 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-22T06:04:44,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-22T06:04:44,651 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:04:44,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742214_1390 (size=404) 2024-11-22T06:04:44,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742214_1390 (size=404) 2024-11-22T06:04:44,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742214_1390 (size=404) 2024-11-22T06:04:44,673 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ad39bbcb25203ccdc96629b51aad1e82, NAME => 'testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:44,675 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d0d7aed9add9567f9b2dc3ac93141bf0, NAME => 'testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:44,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742215_1391 (size=65) 2024-11-22T06:04:44,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742215_1391 (size=65) 2024-11-22T06:04:44,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742215_1391 (size=65) 2024-11-22T06:04:44,688 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:44,689 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing ad39bbcb25203ccdc96629b51aad1e82, disabling compactions & flushes 2024-11-22T06:04:44,689 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:44,689 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:44,689 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. after waiting 0 ms 2024-11-22T06:04:44,689 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:44,689 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:44,689 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for ad39bbcb25203ccdc96629b51aad1e82: Waiting for close lock at 1732255484689Disabling compacts and flushes for region at 1732255484689Disabling writes for close at 1732255484689Writing region close event to WAL at 1732255484689Closed at 1732255484689 2024-11-22T06:04:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742216_1392 (size=65) 2024-11-22T06:04:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742216_1392 (size=65) 2024-11-22T06:04:44,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742216_1392 (size=65) 2024-11-22T06:04:44,707 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:44,707 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing d0d7aed9add9567f9b2dc3ac93141bf0, disabling compactions & flushes 2024-11-22T06:04:44,707 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:44,708 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:44,708 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. after waiting 0 ms 2024-11-22T06:04:44,708 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:44,708 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:44,708 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for d0d7aed9add9567f9b2dc3ac93141bf0: Waiting for close lock at 1732255484707Disabling compacts and flushes for region at 1732255484707Disabling writes for close at 1732255484708 (+1 ms)Writing region close event to WAL at 1732255484708Closed at 1732255484708 2024-11-22T06:04:44,709 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:04:44,709 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732255484709"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255484709"}]},"ts":"1732255484709"} 2024-11-22T06:04:44,709 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732255484709"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255484709"}]},"ts":"1732255484709"} 2024-11-22T06:04:44,711 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:04:44,712 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:04:44,712 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255484712"}]},"ts":"1732255484712"} 2024-11-22T06:04:44,714 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-22T06:04:44,714 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:04:44,716 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:04:44,716 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:04:44,716 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:04:44,716 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:04:44,716 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:04:44,716 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:04:44,716 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:04:44,716 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:04:44,716 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:04:44,716 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:04:44,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, ASSIGN}] 2024-11-22T06:04:44,718 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, ASSIGN 2024-11-22T06:04:44,718 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, ASSIGN 2024-11-22T06:04:44,719 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:04:44,719 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, ASSIGN; state=OFFLINE, location=407411ea95db,41799,1732255321718; forceNewPlan=false, retain=false 2024-11-22T06:04:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-22T06:04:44,869 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:04:44,869 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=ad39bbcb25203ccdc96629b51aad1e82, regionState=OPENING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:04:44,869 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=d0d7aed9add9567f9b2dc3ac93141bf0, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:44,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, ASSIGN because future has completed 2024-11-22T06:04:44,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:04:44,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, ASSIGN because future has completed 2024-11-22T06:04:44,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad39bbcb25203ccdc96629b51aad1e82, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:04:44,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-22T06:04:45,028 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:45,028 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => d0d7aed9add9567f9b2dc3ac93141bf0, NAME => 'testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:04:45,029 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. service=AccessControlService 2024-11-22T06:04:45,030 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:45,030 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,030 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:45,030 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,030 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,032 INFO [StoreOpener-d0d7aed9add9567f9b2dc3ac93141bf0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,033 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,033 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => ad39bbcb25203ccdc96629b51aad1e82, NAME => 'testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:04:45,033 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. service=AccessControlService 2024-11-22T06:04:45,033 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:04:45,034 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,034 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:04:45,034 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,034 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,035 INFO [StoreOpener-d0d7aed9add9567f9b2dc3ac93141bf0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0d7aed9add9567f9b2dc3ac93141bf0 columnFamilyName cf 2024-11-22T06:04:45,036 DEBUG [StoreOpener-d0d7aed9add9567f9b2dc3ac93141bf0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:45,036 INFO [StoreOpener-d0d7aed9add9567f9b2dc3ac93141bf0-1 {}] regionserver.HStore(327): Store=d0d7aed9add9567f9b2dc3ac93141bf0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:45,036 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,037 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,037 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,038 INFO [StoreOpener-ad39bbcb25203ccdc96629b51aad1e82-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,038 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,038 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,039 INFO [StoreOpener-ad39bbcb25203ccdc96629b51aad1e82-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad39bbcb25203ccdc96629b51aad1e82 columnFamilyName cf 2024-11-22T06:04:45,039 DEBUG [StoreOpener-ad39bbcb25203ccdc96629b51aad1e82-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:04:45,040 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,040 INFO [StoreOpener-ad39bbcb25203ccdc96629b51aad1e82-1 {}] regionserver.HStore(327): Store=ad39bbcb25203ccdc96629b51aad1e82/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:04:45,040 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,042 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,044 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:45,045 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened d0d7aed9add9567f9b2dc3ac93141bf0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71682059, jitterRate=0.06814591586589813}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:45,045 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,045 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for d0d7aed9add9567f9b2dc3ac93141bf0: Running coprocessor pre-open hook at 1732255485030Writing region info on filesystem at 1732255485030Initializing all the Stores at 1732255485031 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255485031Cleaning up temporary data from old regions at 1732255485038 (+7 ms)Running coprocessor post-open hooks at 1732255485045 (+7 ms)Region opened successfully at 1732255485045 2024-11-22T06:04:45,046 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0., pid=178, masterSystemTime=1732255485025 2024-11-22T06:04:45,050 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=d0d7aed9add9567f9b2dc3ac93141bf0, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:04:45,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:04:45,058 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-11-22T06:04:45,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0, server=407411ea95db,44619,1732255321573 in 183 msec 2024-11-22T06:04:45,058 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:45,058 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:45,060 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,060 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, ASSIGN in 342 msec 2024-11-22T06:04:45,062 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,067 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:04:45,067 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened ad39bbcb25203ccdc96629b51aad1e82; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70873735, jitterRate=0.05610094964504242}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:04:45,068 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,068 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for ad39bbcb25203ccdc96629b51aad1e82: Running coprocessor pre-open hook at 1732255485034Writing region info on filesystem at 1732255485034Initializing all the Stores at 1732255485036 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255485036Cleaning up temporary data from old regions at 1732255485060 (+24 ms)Running coprocessor post-open hooks at 1732255485068 (+8 ms)Region opened successfully at 1732255485068 2024-11-22T06:04:45,069 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82., pid=179, masterSystemTime=1732255485027 2024-11-22T06:04:45,071 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,072 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,072 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=ad39bbcb25203ccdc96629b51aad1e82, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:04:45,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad39bbcb25203ccdc96629b51aad1e82, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:04:45,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=176 2024-11-22T06:04:45,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure ad39bbcb25203ccdc96629b51aad1e82, server=407411ea95db,41799,1732255321718 in 203 msec 2024-11-22T06:04:45,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=175 2024-11-22T06:04:45,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, ASSIGN in 366 msec 2024-11-22T06:04:45,086 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:04:45,086 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255485086"}]},"ts":"1732255485086"} 2024-11-22T06:04:45,088 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-22T06:04:45,089 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:04:45,089 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-22T06:04:45,093 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-22T06:04:45,149 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:45,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:45,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:45,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:04:45,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 537 msec 2024-11-22T06:04:45,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-22T06:04:45,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-22T06:04:45,279 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-22T06:04:45,279 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-11-22T06:04:45,279 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:45,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-11-22T06:04:45,283 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:45,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-11-22T06:04:45,283 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-22T06:04:45,286 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-22T06:04:45,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255485286 (current time:1732255485286). 2024-11-22T06:04:45,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:04:45,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-22T06:04:45,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:45,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e0293d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:45,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:45,288 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:45,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:45,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:45,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ae213a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:45,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:45,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,290 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58094, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:45,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3000ce5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:45,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:45,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:45,293 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57826, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:45,294 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:04:45,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:45,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,294 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@380cccb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:45,296 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:45,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:45,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:45,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e8124, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:45,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:45,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,297 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58114, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:45,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c05946a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:45,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:45,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:45,301 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57834, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:45,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:45,304 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:04:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-22T06:04:45,305 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:45,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:45,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-22T06:04:45,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-22T06:04:45,308 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:45,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-22T06:04:45,309 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:45,315 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:45,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742217_1393 (size=161) 2024-11-22T06:04:45,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742217_1393 (size=161) 2024-11-22T06:04:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742217_1393 (size=161) 2024-11-22T06:04:45,338 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:45,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0}] 2024-11-22T06:04:45,340 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,340 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-22T06:04:45,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-22T06:04:45,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-22T06:04:45,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:45,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for d0d7aed9add9567f9b2dc3ac93141bf0: 2024-11-22T06:04:45,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. for emptySnaptb0-testExportWithChecksum completed. 2024-11-22T06:04:45,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for ad39bbcb25203ccdc96629b51aad1e82: 2024-11-22T06:04:45,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-22T06:04:45,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. for emptySnaptb0-testExportWithChecksum completed. 2024-11-22T06:04:45,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:45,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:04:45,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-22T06:04:45,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:45,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:04:45,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742218_1394 (size=68) 2024-11-22T06:04:45,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742218_1394 (size=68) 2024-11-22T06:04:45,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742218_1394 (size=68) 2024-11-22T06:04:45,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:45,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-22T06:04:45,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-22T06:04:45,509 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,509 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,512 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0 in 173 msec 2024-11-22T06:04:45,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742219_1395 (size=68) 2024-11-22T06:04:45,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742219_1395 (size=68) 2024-11-22T06:04:45,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742219_1395 (size=68) 2024-11-22T06:04:45,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-22T06:04:45,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-22T06:04:45,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,529 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-11-22T06:04:45,534 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:45,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82 in 192 msec 2024-11-22T06:04:45,535 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:45,536 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:45,536 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-22T06:04:45,537 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-22T06:04:45,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742220_1396 (size=543) 2024-11-22T06:04:45,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742220_1396 (size=543) 2024-11-22T06:04:45,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742220_1396 (size=543) 2024-11-22T06:04:45,565 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:45,576 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:45,576 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-22T06:04:45,578 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:45,578 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-22T06:04:45,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 272 msec 2024-11-22T06:04:45,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-22T06:04:45,630 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-22T06:04:45,634 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='0c25a1d7fb4ba9d402c9209cb03a6b647', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:45,635 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='109b0c4e3a32dbc7e2a9f4f0074ff5871', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:04:45,639 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='27a48d5fc854c5da1f0356661cb2bddc4', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:04:45,640 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3e4c67ace4e64e0b7cf5ddc05d7171f41', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:04:45,640 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='457e6a5bae63c8c8a5d32127908f93086', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:04:45,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41799 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:45,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:04:45,649 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-22T06:04:45,651 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-22T06:04:45,651 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,652 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:04:45,654 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-22T06:04:45,659 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-22T06:04:45,666 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-22T06:04:45,669 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-22T06:04:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255485670 (current time:1732255485670). 2024-11-22T06:04:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:04:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-22T06:04:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:04:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8844d66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:45,672 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:45,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:45,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:45,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@451380d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:45,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:45,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,674 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:45,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1291818a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:45,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:45,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:45,678 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57844, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:45,680 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:04:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,681 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ddee9bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:04:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:04:45,683 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:04:45,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:04:45,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:04:45,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@104ccdc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:04:45,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:04:45,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,684 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:04:45,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10142ae6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:04:45,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:04:45,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:04:45,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:04:45,689 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57852, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:04:45,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:04:45,692 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:04:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:04:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:04:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-22T06:04:45,693 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:04:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:04:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-22T06:04:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-22T06:04:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-22T06:04:45,697 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:04:45,699 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:04:45,711 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:04:45,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742221_1397 (size=156) 2024-11-22T06:04:45,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742221_1397 (size=156) 2024-11-22T06:04:45,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742221_1397 (size=156) 2024-11-22T06:04:45,737 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:04:45,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0}] 2024-11-22T06:04:45,743 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,743 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-22T06:04:45,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-22T06:04:45,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41799 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-22T06:04:45,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:45,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,896 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing ad39bbcb25203ccdc96629b51aad1e82 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-22T06:04:45,896 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing d0d7aed9add9567f9b2dc3ac93141bf0 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-22T06:04:45,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/.tmp/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c is 71, key is 104a3f6f619e13a162a347600fb80132/cf:q/1732255485647/Put/seqid=0 2024-11-22T06:04:45,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/.tmp/cf/5d14d49f369e470db0fdbec8ae0ee5d3 is 71, key is 0a9afdda1b5549ee6fbf4d2a7541db06/cf:q/1732255485644/Put/seqid=0 2024-11-22T06:04:45,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742222_1398 (size=8392) 2024-11-22T06:04:45,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742222_1398 (size=8392) 2024-11-22T06:04:45,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742222_1398 (size=8392) 2024-11-22T06:04:45,918 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/.tmp/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c 2024-11-22T06:04:45,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742223_1399 (size=5216) 2024-11-22T06:04:45,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742223_1399 (size=5216) 2024-11-22T06:04:45,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742223_1399 (size=5216) 2024-11-22T06:04:45,923 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/.tmp/cf/5d14d49f369e470db0fdbec8ae0ee5d3 2024-11-22T06:04:45,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/.tmp/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c 2024-11-22T06:04:45,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/.tmp/cf/5d14d49f369e470db0fdbec8ae0ee5d3 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/cf/5d14d49f369e470db0fdbec8ae0ee5d3 2024-11-22T06:04:45,931 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c, entries=48, sequenceid=6, filesize=8.2 K 2024-11-22T06:04:45,932 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for d0d7aed9add9567f9b2dc3ac93141bf0 in 36ms, sequenceid=6, compaction requested=false 2024-11-22T06:04:45,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-22T06:04:45,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for d0d7aed9add9567f9b2dc3ac93141bf0: 2024-11-22T06:04:45,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. for snaptb0-testExportWithChecksum completed. 2024-11-22T06:04:45,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-22T06:04:45,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:45,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c] hfiles 2024-11-22T06:04:45,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c for snapshot=snaptb0-testExportWithChecksum 2024-11-22T06:04:45,935 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/cf/5d14d49f369e470db0fdbec8ae0ee5d3, entries=2, sequenceid=6, filesize=5.1 K 2024-11-22T06:04:45,936 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for ad39bbcb25203ccdc96629b51aad1e82 in 40ms, sequenceid=6, compaction requested=false 2024-11-22T06:04:45,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for ad39bbcb25203ccdc96629b51aad1e82: 2024-11-22T06:04:45,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. for snaptb0-testExportWithChecksum completed. 2024-11-22T06:04:45,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-22T06:04:45,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:04:45,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/cf/5d14d49f369e470db0fdbec8ae0ee5d3] hfiles 2024-11-22T06:04:45,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/cf/5d14d49f369e470db0fdbec8ae0ee5d3 for snapshot=snaptb0-testExportWithChecksum 2024-11-22T06:04:45,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742224_1400 (size=107) 2024-11-22T06:04:45,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742224_1400 (size=107) 2024-11-22T06:04:45,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742224_1400 (size=107) 2024-11-22T06:04:45,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:04:45,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-22T06:04:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-22T06:04:45,941 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,941 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:04:45,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0 in 205 msec 2024-11-22T06:04:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742225_1401 (size=107) 2024-11-22T06:04:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742225_1401 (size=107) 2024-11-22T06:04:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742225_1401 (size=107) 2024-11-22T06:04:45,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:04:45,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-22T06:04:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-22T06:04:45,947 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,948 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:04:45,951 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=183 2024-11-22T06:04:45,951 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ad39bbcb25203ccdc96629b51aad1e82 in 211 msec 2024-11-22T06:04:45,951 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:04:45,952 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:04:45,953 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:04:45,953 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-22T06:04:45,954 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-22T06:04:45,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742226_1402 (size=621) 2024-11-22T06:04:45,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742226_1402 (size=621) 2024-11-22T06:04:45,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742226_1402 (size=621) 2024-11-22T06:04:45,978 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:04:45,983 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:04:45,984 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-22T06:04:45,985 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:04:45,985 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-22T06:04:45,987 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 291 msec 2024-11-22T06:04:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-22T06:04:46,010 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-22T06:04:46,010 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010 2024-11-22T06:04:46,010 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:46,038 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:04:46,038 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@888ac35, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-22T06:04:46,039 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:04:46,042 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-22T06:04:46,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:46,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:46,069 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-7029428865010964987.jar 2024-11-22T06:04:47,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-10603832161678980912.jar 2024-11-22T06:04:47,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:04:47,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:04:47,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:04:47,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:04:47,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:04:47,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:04:47,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:04:47,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:04:47,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:04:47,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:04:47,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:04:47,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:04:47,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:47,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:47,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:47,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:47,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:04:47,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:47,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:04:47,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742227_1403 (size=131440) 2024-11-22T06:04:47,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742227_1403 (size=131440) 2024-11-22T06:04:47,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742227_1403 (size=131440) 2024-11-22T06:04:47,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742228_1404 (size=4188619) 2024-11-22T06:04:47,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742228_1404 (size=4188619) 2024-11-22T06:04:47,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742228_1404 (size=4188619) 2024-11-22T06:04:47,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742229_1405 (size=1323991) 2024-11-22T06:04:47,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742229_1405 (size=1323991) 2024-11-22T06:04:47,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742229_1405 (size=1323991) 2024-11-22T06:04:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742230_1406 (size=903736) 2024-11-22T06:04:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742230_1406 (size=903736) 2024-11-22T06:04:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742230_1406 (size=903736) 2024-11-22T06:04:47,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742231_1407 (size=8360083) 2024-11-22T06:04:47,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742231_1407 (size=8360083) 2024-11-22T06:04:47,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742231_1407 (size=8360083) 2024-11-22T06:04:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742232_1408 (size=440656) 2024-11-22T06:04:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742232_1408 (size=440656) 2024-11-22T06:04:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742232_1408 (size=440656) 2024-11-22T06:04:47,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742233_1409 (size=1877034) 2024-11-22T06:04:47,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742233_1409 (size=1877034) 2024-11-22T06:04:47,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742233_1409 (size=1877034) 2024-11-22T06:04:47,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742234_1410 (size=77835) 2024-11-22T06:04:47,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742234_1410 (size=77835) 2024-11-22T06:04:47,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742234_1410 (size=77835) 2024-11-22T06:04:47,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742235_1411 (size=30949) 2024-11-22T06:04:47,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742235_1411 (size=30949) 2024-11-22T06:04:47,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742235_1411 (size=30949) 2024-11-22T06:04:47,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742236_1412 (size=1597347) 2024-11-22T06:04:47,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742236_1412 (size=1597347) 2024-11-22T06:04:47,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742236_1412 (size=1597347) 2024-11-22T06:04:47,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742237_1413 (size=4695811) 2024-11-22T06:04:47,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742237_1413 (size=4695811) 2024-11-22T06:04:47,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742237_1413 (size=4695811) 2024-11-22T06:04:47,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742238_1414 (size=232957) 2024-11-22T06:04:47,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742238_1414 (size=232957) 2024-11-22T06:04:47,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742238_1414 (size=232957) 2024-11-22T06:04:47,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742239_1415 (size=127628) 2024-11-22T06:04:47,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742239_1415 (size=127628) 2024-11-22T06:04:47,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742239_1415 (size=127628) 2024-11-22T06:04:47,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742240_1416 (size=20406) 2024-11-22T06:04:47,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742240_1416 (size=20406) 2024-11-22T06:04:47,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742240_1416 (size=20406) 2024-11-22T06:04:47,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742241_1417 (size=5175431) 2024-11-22T06:04:47,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742241_1417 (size=5175431) 2024-11-22T06:04:47,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742241_1417 (size=5175431) 2024-11-22T06:04:47,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742242_1418 (size=217634) 2024-11-22T06:04:47,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742242_1418 (size=217634) 2024-11-22T06:04:47,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742242_1418 (size=217634) 2024-11-22T06:04:47,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742243_1419 (size=1832290) 2024-11-22T06:04:47,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742243_1419 (size=1832290) 2024-11-22T06:04:47,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742243_1419 (size=1832290) 2024-11-22T06:04:47,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742244_1420 (size=322274) 2024-11-22T06:04:47,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742244_1420 (size=322274) 2024-11-22T06:04:47,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742244_1420 (size=322274) 2024-11-22T06:04:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742245_1421 (size=503880) 2024-11-22T06:04:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742245_1421 (size=503880) 2024-11-22T06:04:47,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742245_1421 (size=503880) 2024-11-22T06:04:47,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742246_1422 (size=29229) 2024-11-22T06:04:47,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742246_1422 (size=29229) 2024-11-22T06:04:47,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742246_1422 (size=29229) 2024-11-22T06:04:47,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742247_1423 (size=24096) 2024-11-22T06:04:47,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742247_1423 (size=24096) 2024-11-22T06:04:47,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742247_1423 (size=24096) 2024-11-22T06:04:47,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742248_1424 (size=111872) 2024-11-22T06:04:47,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742248_1424 (size=111872) 2024-11-22T06:04:47,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742248_1424 (size=111872) 2024-11-22T06:04:47,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742249_1425 (size=6424744) 2024-11-22T06:04:47,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742249_1425 (size=6424744) 2024-11-22T06:04:47,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742249_1425 (size=6424744) 2024-11-22T06:04:47,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742250_1426 (size=45609) 2024-11-22T06:04:47,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742250_1426 (size=45609) 2024-11-22T06:04:47,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742250_1426 (size=45609) 2024-11-22T06:04:47,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742251_1427 (size=136454) 2024-11-22T06:04:47,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742251_1427 (size=136454) 2024-11-22T06:04:47,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742251_1427 (size=136454) 2024-11-22T06:04:47,761 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:04:47,763 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-22T06:04:47,765 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:04:47,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742252_1428 (size=338) 2024-11-22T06:04:47,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742252_1428 (size=338) 2024-11-22T06:04:47,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742252_1428 (size=338) 2024-11-22T06:04:47,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742253_1429 (size=15) 2024-11-22T06:04:47,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742253_1429 (size=15) 2024-11-22T06:04:47,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742253_1429 (size=15) 2024-11-22T06:04:47,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742254_1430 (size=303778) 2024-11-22T06:04:47,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742254_1430 (size=303778) 2024-11-22T06:04:47,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742254_1430 (size=303778) 2024-11-22T06:04:48,174 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:04:48,174 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:04:48,177 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0007_000001 (auth:SIMPLE) from 127.0.0.1:42334 2024-11-22T06:04:48,187 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0007/container_1732255328437_0007_01_000001/launch_container.sh] 2024-11-22T06:04:48,187 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0007/container_1732255328437_0007_01_000001/container_tokens] 2024-11-22T06:04:48,187 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0007/container_1732255328437_0007_01_000001/sysfs] 2024-11-22T06:04:48,911 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0008_000001 (auth:SIMPLE) from 127.0.0.1:46092 2024-11-22T06:04:49,795 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:04:51,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-22T06:04:51,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-22T06:04:51,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-22T06:04:54,916 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0008_000001 (auth:SIMPLE) from 127.0.0.1:50242 2024-11-22T06:04:55,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742255_1431 (size=349428) 2024-11-22T06:04:55,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742255_1431 (size=349428) 2024-11-22T06:04:55,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742255_1431 (size=349428) 2024-11-22T06:04:56,629 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:04:57,185 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0008_000001 (auth:SIMPLE) from 127.0.0.1:38968 2024-11-22T06:04:59,721 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:05:01,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000002/launch_container.sh] 2024-11-22T06:05:01,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000002/container_tokens] 2024-11-22T06:05:01,167 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010/archive/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-22T06:05:03,004 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0008_000001 (auth:SIMPLE) from 127.0.0.1:40738 2024-11-22T06:05:03,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c1407c3a5417180f7dcb0e325387e8f9, had cached 0 bytes from a total of 8188 2024-11-22T06:05:03,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bcf300d151c9df6bfc8b093b51457a6c, had cached 0 bytes from a total of 5422 2024-11-22T06:05:06,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000003/launch_container.sh] 2024-11-22T06:05:06,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000003/container_tokens] 2024-11-22T06:05:06,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010/archive/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-22T06:05:08,017 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0008_000001 (auth:SIMPLE) from 127.0.0.1:40750 2024-11-22T06:05:08,791 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c1407c3a5417180f7dcb0e325387e8f9 changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:05:08,792 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ad39bbcb25203ccdc96629b51aad1e82 changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:05:08,792 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region bcf300d151c9df6bfc8b093b51457a6c changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:05:08,792 DEBUG [master/407411ea95db:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d0d7aed9add9567f9b2dc3ac93141bf0 changed from -1.0 to 0.0, refreshing cache 2024-11-22T06:05:10,933 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000004/launch_container.sh] 2024-11-22T06:05:10,933 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000004/container_tokens] 2024-11-22T06:05:10,933 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_2/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/local-export-1732255486010/archive/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-22T06:05:12,028 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0008_000001 (auth:SIMPLE) from 127.0.0.1:53862 2024-11-22T06:05:14,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742256_1432 (size=21330) 2024-11-22T06:05:14,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742256_1432 (size=21330) 2024-11-22T06:05:14,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742256_1432 (size=21330) 2024-11-22T06:05:14,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742257_1433 (size=460) 2024-11-22T06:05:14,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742257_1433 (size=460) 2024-11-22T06:05:14,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742257_1433 (size=460) 2024-11-22T06:05:15,013 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000005/launch_container.sh] 2024-11-22T06:05:15,013 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000005/container_tokens] 2024-11-22T06:05:15,013 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000005/sysfs] 2024-11-22T06:05:15,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742258_1434 (size=21330) 2024-11-22T06:05:15,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742258_1434 (size=21330) 2024-11-22T06:05:15,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742258_1434 (size=21330) 2024-11-22T06:05:15,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742259_1435 (size=349428) 2024-11-22T06:05:15,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742259_1435 (size=349428) 2024-11-22T06:05:15,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742259_1435 (size=349428) 2024-11-22T06:05:17,046 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732255328437_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:05:17,047 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047 2024-11-22T06:05:17,047 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:05:17,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:05:17,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-22T06:05:17,080 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:05:17,084 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-22T06:05:17,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742260_1436 (size=156) 2024-11-22T06:05:17,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742260_1436 (size=156) 2024-11-22T06:05:17,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742260_1436 (size=156) 2024-11-22T06:05:17,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742261_1437 (size=621) 2024-11-22T06:05:17,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742261_1437 (size=621) 2024-11-22T06:05:17,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742261_1437 (size=621) 2024-11-22T06:05:17,139 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:17,139 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:17,140 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-13506251150808790099.jar 2024-11-22T06:05:18,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-5161732500606901192.jar 2024-11-22T06:05:18,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:18,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:05:18,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:05:18,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:05:18,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:05:18,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:05:18,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:05:18,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:05:18,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:05:18,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:05:18,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:05:18,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:05:18,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:18,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:18,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:05:18,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:18,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:18,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:05:18,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:05:18,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742262_1438 (size=440656) 2024-11-22T06:05:18,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742262_1438 (size=440656) 2024-11-22T06:05:18,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742262_1438 (size=440656) 2024-11-22T06:05:18,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742263_1439 (size=131440) 2024-11-22T06:05:18,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742263_1439 (size=131440) 2024-11-22T06:05:18,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742263_1439 (size=131440) 2024-11-22T06:05:18,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742264_1440 (size=4188619) 2024-11-22T06:05:18,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742264_1440 (size=4188619) 2024-11-22T06:05:18,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742264_1440 (size=4188619) 2024-11-22T06:05:18,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742265_1441 (size=1323991) 2024-11-22T06:05:18,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742265_1441 (size=1323991) 2024-11-22T06:05:18,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742265_1441 (size=1323991) 2024-11-22T06:05:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742266_1442 (size=903736) 2024-11-22T06:05:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742266_1442 (size=903736) 2024-11-22T06:05:18,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742266_1442 (size=903736) 2024-11-22T06:05:18,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742267_1443 (size=8360083) 2024-11-22T06:05:18,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742267_1443 (size=8360083) 2024-11-22T06:05:18,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742267_1443 (size=8360083) 2024-11-22T06:05:18,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742268_1444 (size=1877034) 2024-11-22T06:05:18,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742268_1444 (size=1877034) 2024-11-22T06:05:18,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742268_1444 (size=1877034) 2024-11-22T06:05:18,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742269_1445 (size=77835) 2024-11-22T06:05:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742269_1445 (size=77835) 2024-11-22T06:05:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742269_1445 (size=77835) 2024-11-22T06:05:18,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742270_1446 (size=30949) 2024-11-22T06:05:18,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742270_1446 (size=30949) 2024-11-22T06:05:18,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742270_1446 (size=30949) 2024-11-22T06:05:18,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742271_1447 (size=1597347) 2024-11-22T06:05:18,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742271_1447 (size=1597347) 2024-11-22T06:05:18,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742271_1447 (size=1597347) 2024-11-22T06:05:19,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742272_1448 (size=4695811) 2024-11-22T06:05:19,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742272_1448 (size=4695811) 2024-11-22T06:05:19,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742272_1448 (size=4695811) 2024-11-22T06:05:19,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742273_1449 (size=232957) 2024-11-22T06:05:19,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742273_1449 (size=232957) 2024-11-22T06:05:19,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742273_1449 (size=232957) 2024-11-22T06:05:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742274_1450 (size=127628) 2024-11-22T06:05:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742274_1450 (size=127628) 2024-11-22T06:05:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742274_1450 (size=127628) 2024-11-22T06:05:19,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742275_1451 (size=20406) 2024-11-22T06:05:19,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742275_1451 (size=20406) 2024-11-22T06:05:19,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742275_1451 (size=20406) 2024-11-22T06:05:19,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742276_1452 (size=5175431) 2024-11-22T06:05:19,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742276_1452 (size=5175431) 2024-11-22T06:05:19,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742276_1452 (size=5175431) 2024-11-22T06:05:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742277_1453 (size=217634) 2024-11-22T06:05:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742277_1453 (size=217634) 2024-11-22T06:05:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742277_1453 (size=217634) 2024-11-22T06:05:19,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742278_1454 (size=1832290) 2024-11-22T06:05:19,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742278_1454 (size=1832290) 2024-11-22T06:05:19,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742278_1454 (size=1832290) 2024-11-22T06:05:19,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742279_1455 (size=322274) 2024-11-22T06:05:19,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742279_1455 (size=322274) 2024-11-22T06:05:19,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742279_1455 (size=322274) 2024-11-22T06:05:19,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742280_1456 (size=503880) 2024-11-22T06:05:19,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742280_1456 (size=503880) 2024-11-22T06:05:19,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742280_1456 (size=503880) 2024-11-22T06:05:19,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742281_1457 (size=6424744) 2024-11-22T06:05:19,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742281_1457 (size=6424744) 2024-11-22T06:05:19,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742281_1457 (size=6424744) 2024-11-22T06:05:19,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742282_1458 (size=29229) 2024-11-22T06:05:19,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742282_1458 (size=29229) 2024-11-22T06:05:19,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742282_1458 (size=29229) 2024-11-22T06:05:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742283_1459 (size=24096) 2024-11-22T06:05:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742283_1459 (size=24096) 2024-11-22T06:05:19,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742283_1459 (size=24096) 2024-11-22T06:05:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742284_1460 (size=111872) 2024-11-22T06:05:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742284_1460 (size=111872) 2024-11-22T06:05:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742284_1460 (size=111872) 2024-11-22T06:05:19,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742285_1461 (size=45609) 2024-11-22T06:05:19,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742285_1461 (size=45609) 2024-11-22T06:05:19,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742285_1461 (size=45609) 2024-11-22T06:05:19,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742286_1462 (size=136454) 2024-11-22T06:05:19,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742286_1462 (size=136454) 2024-11-22T06:05:19,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742286_1462 (size=136454) 2024-11-22T06:05:19,392 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:05:19,394 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-22T06:05:19,397 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:05:19,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742287_1463 (size=338) 2024-11-22T06:05:19,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742287_1463 (size=338) 2024-11-22T06:05:19,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742287_1463 (size=338) 2024-11-22T06:05:19,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742288_1464 (size=15) 2024-11-22T06:05:19,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742288_1464 (size=15) 2024-11-22T06:05:19,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742288_1464 (size=15) 2024-11-22T06:05:19,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742289_1465 (size=303730) 2024-11-22T06:05:19,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742289_1465 (size=303730) 2024-11-22T06:05:19,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742289_1465 (size=303730) 2024-11-22T06:05:21,122 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:05:21,122 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:05:21,126 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0008_000001 (auth:SIMPLE) from 127.0.0.1:53868 2024-11-22T06:05:21,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000001/launch_container.sh] 2024-11-22T06:05:21,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000001/container_tokens] 2024-11-22T06:05:21,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0008/container_1732255328437_0008_01_000001/sysfs] 2024-11-22T06:05:21,929 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0009_000001 (auth:SIMPLE) from 127.0.0.1:59186 2024-11-22T06:05:27,563 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0009_000001 (auth:SIMPLE) from 127.0.0.1:42114 2024-11-22T06:05:27,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742290_1466 (size=349380) 2024-11-22T06:05:27,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742290_1466 (size=349380) 2024-11-22T06:05:27,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742290_1466 (size=349380) 2024-11-22T06:05:29,722 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:05:29,783 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0009_000001 (auth:SIMPLE) from 127.0.0.1:59188 2024-11-22T06:05:30,030 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d0d7aed9add9567f9b2dc3ac93141bf0, had cached 0 bytes from a total of 8392 2024-11-22T06:05:30,034 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ad39bbcb25203ccdc96629b51aad1e82, had cached 0 bytes from a total of 5216 2024-11-22T06:05:32,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742291_1467 (size=8392) 2024-11-22T06:05:32,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742291_1467 (size=8392) 2024-11-22T06:05:32,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742291_1467 (size=8392) 2024-11-22T06:05:32,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742292_1468 (size=5216) 2024-11-22T06:05:32,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742292_1468 (size=5216) 2024-11-22T06:05:32,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742292_1468 (size=5216) 2024-11-22T06:05:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742293_1469 (size=17413) 2024-11-22T06:05:32,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742293_1469 (size=17413) 2024-11-22T06:05:32,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742293_1469 (size=17413) 2024-11-22T06:05:32,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742294_1470 (size=462) 2024-11-22T06:05:32,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742294_1470 (size=462) 2024-11-22T06:05:32,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742294_1470 (size=462) 2024-11-22T06:05:32,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742295_1471 (size=17413) 2024-11-22T06:05:32,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742295_1471 (size=17413) 2024-11-22T06:05:32,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742295_1471 (size=17413) 2024-11-22T06:05:33,016 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0009/container_1732255328437_0009_01_000002/launch_container.sh] 2024-11-22T06:05:33,016 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0009/container_1732255328437_0009_01_000002/container_tokens] 2024-11-22T06:05:33,016 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_0/usercache/jenkins/appcache/application_1732255328437_0009/container_1732255328437_0009_01_000002/sysfs] 2024-11-22T06:05:33,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742296_1472 (size=349380) 2024-11-22T06:05:33,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742296_1472 (size=349380) 2024-11-22T06:05:33,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742296_1472 (size=349380) 2024-11-22T06:05:33,435 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0009_000001 (auth:SIMPLE) from 127.0.0.1:57730 2024-11-22T06:05:34,623 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:05:34,623 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:05:34,635 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-22T06:05:34,635 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:05:34,636 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:05:34,636 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-22T06:05:34,636 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-22T06:05:34,636 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-22T06:05:34,636 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-22T06:05:34,637 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-22T06:05:34,637 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255517047/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-22T06:05:34,651 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-22T06:05:34,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:05:34,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-22T06:05:34,657 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255534656"}]},"ts":"1732255534656"} 2024-11-22T06:05:34,659 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-22T06:05:34,659 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-22T06:05:34,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-22T06:05:34,662 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, UNASSIGN}, {pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, UNASSIGN}] 2024-11-22T06:05:34,664 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, UNASSIGN 2024-11-22T06:05:34,664 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, UNASSIGN 2024-11-22T06:05:34,665 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=ad39bbcb25203ccdc96629b51aad1e82, regionState=CLOSING, regionLocation=407411ea95db,41799,1732255321718 2024-11-22T06:05:34,665 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=d0d7aed9add9567f9b2dc3ac93141bf0, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:05:34,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, UNASSIGN because future has completed 2024-11-22T06:05:34,669 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:05:34,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure ad39bbcb25203ccdc96629b51aad1e82, server=407411ea95db,41799,1732255321718}] 2024-11-22T06:05:34,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, UNASSIGN because future has completed 2024-11-22T06:05:34,672 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:05:34,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:05:34,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-22T06:05:34,824 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(122): Close ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:05:34,824 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:05:34,824 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1722): Closing ad39bbcb25203ccdc96629b51aad1e82, disabling compactions & flushes 2024-11-22T06:05:34,824 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:05:34,824 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:05:34,824 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. after waiting 0 ms 2024-11-22T06:05:34,824 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:05:34,827 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(122): Close d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:05:34,827 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:05:34,827 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1722): Closing d0d7aed9add9567f9b2dc3ac93141bf0, disabling compactions & flushes 2024-11-22T06:05:34,827 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:05:34,827 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:05:34,827 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. after waiting 0 ms 2024-11-22T06:05:34,827 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:05:34,829 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:05:34,830 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:05:34,830 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82. 2024-11-22T06:05:34,830 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1676): Region close journal for ad39bbcb25203ccdc96629b51aad1e82: Waiting for close lock at 1732255534824Running coprocessor pre-close hooks at 1732255534824Disabling compacts and flushes for region at 1732255534824Disabling writes for close at 1732255534824Writing region close event to WAL at 1732255534825 (+1 ms)Running coprocessor post-close hooks at 1732255534830 (+5 ms)Closed at 1732255534830 2024-11-22T06:05:34,833 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(157): Closed ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:05:34,833 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=ad39bbcb25203ccdc96629b51aad1e82, regionState=CLOSED 2024-11-22T06:05:34,834 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:05:34,835 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:05:34,835 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0. 2024-11-22T06:05:34,835 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1676): Region close journal for d0d7aed9add9567f9b2dc3ac93141bf0: Waiting for close lock at 1732255534827Running coprocessor pre-close hooks at 1732255534827Disabling compacts and flushes for region at 1732255534827Disabling writes for close at 1732255534827Writing region close event to WAL at 1732255534828 (+1 ms)Running coprocessor post-close hooks at 1732255534835 (+7 ms)Closed at 1732255534835 2024-11-22T06:05:34,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure ad39bbcb25203ccdc96629b51aad1e82, server=407411ea95db,41799,1732255321718 because future has completed 2024-11-22T06:05:34,839 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(157): Closed d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:05:34,839 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=d0d7aed9add9567f9b2dc3ac93141bf0, regionState=CLOSED 2024-11-22T06:05:34,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:05:34,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=188 2024-11-22T06:05:34,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=188, state=SUCCESS, hasLock=false; CloseRegionProcedure ad39bbcb25203ccdc96629b51aad1e82, server=407411ea95db,41799,1732255321718 in 170 msec 2024-11-22T06:05:34,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ad39bbcb25203ccdc96629b51aad1e82, UNASSIGN in 183 msec 2024-11-22T06:05:34,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-22T06:05:34,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; CloseRegionProcedure d0d7aed9add9567f9b2dc3ac93141bf0, server=407411ea95db,44619,1732255321573 in 172 msec 2024-11-22T06:05:34,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=187 2024-11-22T06:05:34,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d0d7aed9add9567f9b2dc3ac93141bf0, UNASSIGN in 186 msec 2024-11-22T06:05:34,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-22T06:05:34,854 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 192 msec 2024-11-22T06:05:34,858 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255534857"}]},"ts":"1732255534857"} 2024-11-22T06:05:34,860 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-22T06:05:34,860 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-22T06:05:34,864 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 211 msec 2024-11-22T06:05:34,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-22T06:05:34,970 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-22T06:05:34,971 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-22T06:05:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:05:34,972 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:05:34,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-22T06:05:34,974 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=192, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:05:34,979 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-22T06:05:34,982 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:05:34,982 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:05:34,985 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/recovered.edits] 2024-11-22T06:05:34,985 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/recovered.edits] 2024-11-22T06:05:34,995 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/cf/5d14d49f369e470db0fdbec8ae0ee5d3 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/cf/5d14d49f369e470db0fdbec8ae0ee5d3 2024-11-22T06:05:34,995 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/cf/b9d13a9cfc0a463cb4e8ec4ea37e135c 2024-11-22T06:05:34,999 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82/recovered.edits/9.seqid 2024-11-22T06:05:35,000 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/ad39bbcb25203ccdc96629b51aad1e82 2024-11-22T06:05:35,000 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0/recovered.edits/9.seqid 2024-11-22T06:05:35,000 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportWithChecksum/d0d7aed9add9567f9b2dc3ac93141bf0 2024-11-22T06:05:35,001 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-22T06:05:35,005 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=192, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:05:35,008 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-22T06:05:35,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,038 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,039 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-22T06:05:35,040 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-22T06:05:35,040 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-22T06:05:35,041 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-22T06:05:35,042 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-22T06:05:35,044 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=192, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:05:35,044 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-22T06:05:35,044 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255535044"}]},"ts":"9223372036854775807"} 2024-11-22T06:05:35,044 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255535044"}]},"ts":"9223372036854775807"} 2024-11-22T06:05:35,047 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:05:35,047 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ad39bbcb25203ccdc96629b51aad1e82, NAME => 'testtb-testExportWithChecksum,,1732255484642.ad39bbcb25203ccdc96629b51aad1e82.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d0d7aed9add9567f9b2dc3ac93141bf0, NAME => 'testtb-testExportWithChecksum,1,1732255484642.d0d7aed9add9567f9b2dc3ac93141bf0.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:05:35,048 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-22T06:05:35,048 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255535048"}]},"ts":"9223372036854775807"} 2024-11-22T06:05:35,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,050 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,050 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-22T06:05:35,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-22T06:05:35,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,055 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,058 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-22T06:05:35,059 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=192, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-22T06:05:35,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 88 msec 2024-11-22T06:05:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-22T06:05:35,159 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-22T06:05:35,159 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-22T06:05:35,166 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-22T06:05:35,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-22T06:05:35,169 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-22T06:05:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-22T06:05:35,190 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=807 (was 808), OpenFileDescriptor=807 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=647 (was 539) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=4458 (was 4911) 2024-11-22T06:05:35,190 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-22T06:05:35,206 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=807, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=647, ProcessCount=17, AvailableMemoryMB=4457 2024-11-22T06:05:35,206 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-22T06:05:35,208 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T06:05:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:35,209 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T06:05:35,210 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:05:35,210 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 193 2024-11-22T06:05:35,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-22T06:05:35,210 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T06:05:35,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742297_1473 (size=418) 2024-11-22T06:05:35,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742297_1473 (size=418) 2024-11-22T06:05:35,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742297_1473 (size=418) 2024-11-22T06:05:35,229 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 70cc174be652d64ec07f1005eaa473af, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:05:35,229 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 11b79421a8f4d2dc2c325ae22e6ed4f0, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:05:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742298_1474 (size=79) 2024-11-22T06:05:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742298_1474 (size=79) 2024-11-22T06:05:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742298_1474 (size=79) 2024-11-22T06:05:35,244 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:05:35,244 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 70cc174be652d64ec07f1005eaa473af, disabling compactions & flushes 2024-11-22T06:05:35,244 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:35,244 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:35,244 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. after waiting 0 ms 2024-11-22T06:05:35,244 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:35,244 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:35,244 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 70cc174be652d64ec07f1005eaa473af: Waiting for close lock at 1732255535244Disabling compacts and flushes for region at 1732255535244Disabling writes for close at 1732255535244Writing region close event to WAL at 1732255535244Closed at 1732255535244 2024-11-22T06:05:35,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742299_1475 (size=79) 2024-11-22T06:05:35,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742299_1475 (size=79) 2024-11-22T06:05:35,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742299_1475 (size=79) 2024-11-22T06:05:35,256 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:05:35,256 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 11b79421a8f4d2dc2c325ae22e6ed4f0, disabling compactions & flushes 2024-11-22T06:05:35,256 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:35,256 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:35,256 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. after waiting 0 ms 2024-11-22T06:05:35,256 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:35,256 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:35,256 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 11b79421a8f4d2dc2c325ae22e6ed4f0: Waiting for close lock at 1732255535256Disabling compacts and flushes for region at 1732255535256Disabling writes for close at 1732255535256Writing region close event to WAL at 1732255535256Closed at 1732255535256 2024-11-22T06:05:35,257 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T06:05:35,257 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732255535257"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255535257"}]},"ts":"1732255535257"} 2024-11-22T06:05:35,257 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732255535257"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732255535257"}]},"ts":"1732255535257"} 2024-11-22T06:05:35,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-22T06:05:35,261 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T06:05:35,261 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255535261"}]},"ts":"1732255535261"} 2024-11-22T06:05:35,262 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-22T06:05:35,263 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {407411ea95db=0} racks are {/default-rack=0} 2024-11-22T06:05:35,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-22T06:05:35,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-22T06:05:35,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-22T06:05:35,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-22T06:05:35,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-22T06:05:35,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-22T06:05:35,264 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-22T06:05:35,264 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-22T06:05:35,264 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-22T06:05:35,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-22T06:05:35,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, ASSIGN}, {pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, ASSIGN}] 2024-11-22T06:05:35,265 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, ASSIGN 2024-11-22T06:05:35,265 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, ASSIGN 2024-11-22T06:05:35,266 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, ASSIGN; state=OFFLINE, location=407411ea95db,35283,1732255321784; forceNewPlan=false, retain=false 2024-11-22T06:05:35,266 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, ASSIGN; state=OFFLINE, location=407411ea95db,44619,1732255321573; forceNewPlan=false, retain=false 2024-11-22T06:05:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-22T06:05:35,417 INFO [407411ea95db:40429 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-22T06:05:35,418 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=11b79421a8f4d2dc2c325ae22e6ed4f0, regionState=OPENING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:05:35,418 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=70cc174be652d64ec07f1005eaa473af, regionState=OPENING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:05:35,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, ASSIGN because future has completed 2024-11-22T06:05:35,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, ASSIGN because future has completed 2024-11-22T06:05:35,422 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:05:35,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70cc174be652d64ec07f1005eaa473af, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:05:35,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-22T06:05:35,578 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:35,579 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7752): Opening region: {ENCODED => 11b79421a8f4d2dc2c325ae22e6ed4f0, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.', STARTKEY => '1', ENDKEY => ''} 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7752): Opening region: {ENCODED => 70cc174be652d64ec07f1005eaa473af, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.', STARTKEY => '', ENDKEY => '1'} 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. service=AccessControlService 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. service=AccessControlService 2024-11-22T06:05:35,579 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:05:35,579 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:05:35,579 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T06:05:35,580 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7794): checking encryption for 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,580 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7794): checking encryption for 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,580 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7797): checking classloading for 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,580 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7797): checking classloading for 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,581 INFO [StoreOpener-11b79421a8f4d2dc2c325ae22e6ed4f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,581 INFO [StoreOpener-70cc174be652d64ec07f1005eaa473af-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,582 INFO [StoreOpener-70cc174be652d64ec07f1005eaa473af-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 70cc174be652d64ec07f1005eaa473af columnFamilyName cf 2024-11-22T06:05:35,582 INFO [StoreOpener-11b79421a8f4d2dc2c325ae22e6ed4f0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 11b79421a8f4d2dc2c325ae22e6ed4f0 columnFamilyName cf 2024-11-22T06:05:35,582 DEBUG [StoreOpener-11b79421a8f4d2dc2c325ae22e6ed4f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:05:35,582 DEBUG [StoreOpener-70cc174be652d64ec07f1005eaa473af-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T06:05:35,583 INFO [StoreOpener-70cc174be652d64ec07f1005eaa473af-1 {}] regionserver.HStore(327): Store=70cc174be652d64ec07f1005eaa473af/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:05:35,583 INFO [StoreOpener-11b79421a8f4d2dc2c325ae22e6ed4f0-1 {}] regionserver.HStore(327): Store=11b79421a8f4d2dc2c325ae22e6ed4f0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T06:05:35,583 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1038): replaying wal for 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,583 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1038): replaying wal for 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,584 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,584 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,584 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,584 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,585 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1048): stopping wal replay for 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,585 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1048): stopping wal replay for 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,585 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1060): Cleaning up temporary data for 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,585 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1060): Cleaning up temporary data for 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,586 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1093): writing seq id for 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,586 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1093): writing seq id for 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,588 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:05:35,588 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T06:05:35,588 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1114): Opened 11b79421a8f4d2dc2c325ae22e6ed4f0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63734531, jitterRate=-0.05028147995471954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:05:35,588 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,588 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1114): Opened 70cc174be652d64ec07f1005eaa473af; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64968849, jitterRate=-0.03188870847225189}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T06:05:35,588 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,589 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1006): Region open journal for 11b79421a8f4d2dc2c325ae22e6ed4f0: Running coprocessor pre-open hook at 1732255535580Writing region info on filesystem at 1732255535580Initializing all the Stores at 1732255535580Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255535581 (+1 ms)Cleaning up temporary data from old regions at 1732255535585 (+4 ms)Running coprocessor post-open hooks at 1732255535588 (+3 ms)Region opened successfully at 1732255535589 (+1 ms) 2024-11-22T06:05:35,589 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1006): Region open journal for 70cc174be652d64ec07f1005eaa473af: Running coprocessor pre-open hook at 1732255535580Writing region info on filesystem at 1732255535580Initializing all the Stores at 1732255535580Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732255535581 (+1 ms)Cleaning up temporary data from old regions at 1732255535585 (+4 ms)Running coprocessor post-open hooks at 1732255535588 (+3 ms)Region opened successfully at 1732255535589 (+1 ms) 2024-11-22T06:05:35,589 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0., pid=196, masterSystemTime=1732255535576 2024-11-22T06:05:35,589 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af., pid=197, masterSystemTime=1732255535577 2024-11-22T06:05:35,591 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:35,591 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:35,592 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=70cc174be652d64ec07f1005eaa473af, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:05:35,592 DEBUG [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:35,592 INFO [RS_OPEN_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:35,593 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=11b79421a8f4d2dc2c325ae22e6ed4f0, regionState=OPEN, openSeqNum=2, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:05:35,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure 70cc174be652d64ec07f1005eaa473af, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:05:35,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:05:35,598 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=194 2024-11-22T06:05:35,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=194, state=SUCCESS, hasLock=false; OpenRegionProcedure 70cc174be652d64ec07f1005eaa473af, server=407411ea95db,44619,1732255321573 in 173 msec 2024-11-22T06:05:35,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, ASSIGN in 335 msec 2024-11-22T06:05:35,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-11-22T06:05:35,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; OpenRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0, server=407411ea95db,35283,1732255321784 in 176 msec 2024-11-22T06:05:35,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=195, resume processing ppid=193 2024-11-22T06:05:35,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, ASSIGN in 336 msec 2024-11-22T06:05:35,602 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T06:05:35,603 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255535602"}]},"ts":"1732255535602"} 2024-11-22T06:05:35,605 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-22T06:05:35,606 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T06:05:35,606 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-22T06:05:35,609 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-22T06:05:35,655 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:35,664 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,664 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,664 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,664 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,665 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,665 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 455 msec 2024-11-22T06:05:35,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:35,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-22T06:05:35,840 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-22T06:05:35,840 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-11-22T06:05:35,840 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:05:35,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-11-22T06:05:35,845 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:05:35,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-11-22T06:05:35,845 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-22T06:05:35,848 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-22T06:05:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255535848 (current time:1732255535848). 2024-11-22T06:05:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:05:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-22T06:05:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:05:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d157207, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:05:35,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:05:35,850 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:05:35,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:05:35,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:05:35,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@461eaeb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:35,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:05:35,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:05:35,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:35,851 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59816, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:05:35,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da2cf9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:35,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:05:35,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:05:35,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:05:35,854 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:05:35,856 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:05:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:05:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:35,856 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:05:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48558442, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:05:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:05:35,869 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:05:35,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:05:35,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:05:35,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d667100, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:35,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:05:35,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:05:35,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:35,870 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59826, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:05:35,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2acad312, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:35,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:05:35,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:05:35,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:05:35,874 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:05:35,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:05:35,878 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:05:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:05:35,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:35,879 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:05:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-22T06:05:35,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:05:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-22T06:05:35,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-22T06:05:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-22T06:05:35,882 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:05:35,883 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:05:35,887 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:05:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742300_1476 (size=203) 2024-11-22T06:05:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742300_1476 (size=203) 2024-11-22T06:05:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742300_1476 (size=203) 2024-11-22T06:05:35,898 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:05:35,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af}, {pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0}] 2024-11-22T06:05:35,899 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:35,899 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:35,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-22T06:05:36,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=199 2024-11-22T06:05:36,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=200 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.HRegion(2603): Flush status journal for 11b79421a8f4d2dc2c325ae22e6ed4f0: 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.HRegion(2603): Flush status journal for 70cc174be652d64ec07f1005eaa473af: 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:05:36,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-22T06:05:36,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742301_1477 (size=82) 2024-11-22T06:05:36,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742301_1477 (size=82) 2024-11-22T06:05:36,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742301_1477 (size=82) 2024-11-22T06:05:36,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=200 2024-11-22T06:05:36,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=200 2024-11-22T06:05:36,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742302_1478 (size=82) 2024-11-22T06:05:36,072 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:36,073 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:36,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742302_1478 (size=82) 2024-11-22T06:05:36,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742302_1478 (size=82) 2024-11-22T06:05:36,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:36,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=199 2024-11-22T06:05:36,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=199 2024-11-22T06:05:36,074 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:36,074 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:36,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0 in 176 msec 2024-11-22T06:05:36,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=198 2024-11-22T06:05:36,078 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:05:36,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af in 177 msec 2024-11-22T06:05:36,078 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:05:36,079 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:05:36,079 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,080 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742303_1479 (size=585) 2024-11-22T06:05:36,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742303_1479 (size=585) 2024-11-22T06:05:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742303_1479 (size=585) 2024-11-22T06:05:36,096 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:05:36,101 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:05:36,102 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,104 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:05:36,104 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-22T06:05:36,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 224 msec 2024-11-22T06:05:36,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-22T06:05:36,199 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-22T06:05:36,203 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='07e356363ad78023820d6d0f890cbac56', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af., hostname=407411ea95db,44619,1732255321573, seqNum=2] 2024-11-22T06:05:36,204 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='194512ba783ac8e4d854e7c554a9db33e', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:05:36,206 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2699968d0b750b0ffb0626666302043da', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:05:36,207 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='35f5ae646c24013723b459d762f671f7e', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0., hostname=407411ea95db,35283,1732255321784, seqNum=2] 2024-11-22T06:05:36,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:05:36,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35283 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-22T06:05:36,212 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-22T06:05:36,214 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,214 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:36,214 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T06:05:36,216 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-22T06:05:36,221 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-22T06:05:36,227 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-22T06:05:36,230 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-22T06:05:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732255536230 (current time:1732255536230). 2024-11-22T06:05:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-22T06:05:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-22T06:05:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-22T06:05:36,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c4f8c9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:36,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:05:36,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:05:36,231 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:05:36,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:05:36,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:05:36,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@191d5bbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:36,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:05:36,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:05:36,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:36,233 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59842, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:05:36,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c10a934, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:36,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:05:36,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:05:36,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:05:36,235 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33658, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:05:36,236 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:05:36,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:05:36,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:36,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:36,237 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:05:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23f989fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ClusterIdFetcher(90): Going to request 407411ea95db,40429,-1 for getting cluster id 2024-11-22T06:05:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T06:05:36,238 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6eeb2921-6f20-4de7-bcc0-81ebecd4007b' 2024-11-22T06:05:36,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T06:05:36,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6eeb2921-6f20-4de7-bcc0-81ebecd4007b" 2024-11-22T06:05:36,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eda6351, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:36,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [407411ea95db,40429,-1] 2024-11-22T06:05:36,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T06:05:36,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:36,240 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59850, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T06:05:36,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14cf6cae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T06:05:36,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T06:05:36,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=407411ea95db,41799,1732255321718, seqNum=-1] 2024-11-22T06:05:36,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T06:05:36,243 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T06:05:36,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., hostname=407411ea95db,41799,1732255321718, seqNum=2] 2024-11-22T06:05:36,247 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429. 2024-11-22T06:05:36,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor220.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T06:05:36,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:36,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:05:36,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-22T06:05:36,247 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:05:36,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-22T06:05:36,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-22T06:05:36,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-22T06:05:36,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-22T06:05:36,250 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-22T06:05:36,251 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-22T06:05:36,254 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-22T06:05:36,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742304_1480 (size=198) 2024-11-22T06:05:36,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742304_1480 (size=198) 2024-11-22T06:05:36,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742304_1480 (size=198) 2024-11-22T06:05:36,278 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-22T06:05:36,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0}] 2024-11-22T06:05:36,279 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:36,279 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-22T06:05:36,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35283 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-22T06:05:36,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-22T06:05:36,432 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:36,432 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:36,432 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2902): Flushing 70cc174be652d64ec07f1005eaa473af 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-22T06:05:36,432 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2902): Flushing 11b79421a8f4d2dc2c325ae22e6ed4f0 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-22T06:05:36,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/.tmp/cf/9aebab52c67548fd92e0d3c326af392f is 71, key is 071ac81ae7d5ede41795c68597b3f485/cf:q/1732255536209/Put/seqid=0 2024-11-22T06:05:36,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/.tmp/cf/b2d596265ec24efaa32cb338d662bf48 is 71, key is 1178a1a9ac1c4dce336d317dfe777171/cf:q/1732255536211/Put/seqid=0 2024-11-22T06:05:36,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742305_1481 (size=5286) 2024-11-22T06:05:36,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742305_1481 (size=5286) 2024-11-22T06:05:36,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742305_1481 (size=5286) 2024-11-22T06:05:36,461 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/.tmp/cf/9aebab52c67548fd92e0d3c326af392f 2024-11-22T06:05:36,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/.tmp/cf/9aebab52c67548fd92e0d3c326af392f as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/cf/9aebab52c67548fd92e0d3c326af392f 2024-11-22T06:05:36,476 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/cf/9aebab52c67548fd92e0d3c326af392f, entries=3, sequenceid=6, filesize=5.2 K 2024-11-22T06:05:36,477 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 70cc174be652d64ec07f1005eaa473af in 45ms, sequenceid=6, compaction requested=false 2024-11-22T06:05:36,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-22T06:05:36,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 70cc174be652d64ec07f1005eaa473af: 2024-11-22T06:05:36,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-22T06:05:36,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:05:36,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/cf/9aebab52c67548fd92e0d3c326af392f] hfiles 2024-11-22T06:05:36,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/cf/9aebab52c67548fd92e0d3c326af392f for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742306_1482 (size=8324) 2024-11-22T06:05:36,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742306_1482 (size=8324) 2024-11-22T06:05:36,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742306_1482 (size=8324) 2024-11-22T06:05:36,490 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/.tmp/cf/b2d596265ec24efaa32cb338d662bf48 2024-11-22T06:05:36,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/.tmp/cf/b2d596265ec24efaa32cb338d662bf48 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/cf/b2d596265ec24efaa32cb338d662bf48 2024-11-22T06:05:36,502 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/cf/b2d596265ec24efaa32cb338d662bf48, entries=47, sequenceid=6, filesize=8.1 K 2024-11-22T06:05:36,503 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 11b79421a8f4d2dc2c325ae22e6ed4f0 in 71ms, sequenceid=6, compaction requested=false 2024-11-22T06:05:36,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for 11b79421a8f4d2dc2c325ae22e6ed4f0: 2024-11-22T06:05:36,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-22T06:05:36,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-22T06:05:36,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/cf/b2d596265ec24efaa32cb338d662bf48] hfiles 2024-11-22T06:05:36,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/cf/b2d596265ec24efaa32cb338d662bf48 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742307_1483 (size=121) 2024-11-22T06:05:36,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742307_1483 (size=121) 2024-11-22T06:05:36,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742307_1483 (size=121) 2024-11-22T06:05:36,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:36,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-22T06:05:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-22T06:05:36,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:36,513 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:36,515 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 70cc174be652d64ec07f1005eaa473af in 236 msec 2024-11-22T06:05:36,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742308_1484 (size=121) 2024-11-22T06:05:36,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742308_1484 (size=121) 2024-11-22T06:05:36,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742308_1484 (size=121) 2024-11-22T06:05:36,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:36,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/407411ea95db:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-22T06:05:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-22T06:05:36,530 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:36,530 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:36,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=203, resume processing ppid=201 2024-11-22T06:05:36,534 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-22T06:05:36,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0 in 253 msec 2024-11-22T06:05:36,536 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-22T06:05:36,537 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-22T06:05:36,537 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,537 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742309_1485 (size=663) 2024-11-22T06:05:36,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742309_1485 (size=663) 2024-11-22T06:05:36,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742309_1485 (size=663) 2024-11-22T06:05:36,560 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-22T06:05:36,566 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-22T06:05:36,567 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,568 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-22T06:05:36,568 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-22T06:05:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-22T06:05:36,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 320 msec 2024-11-22T06:05:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-22T06:05:36,879 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-22T06:05:36,880 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879 2024-11-22T06:05:36,880 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:33847, tgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879, rawTgtDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879, srcFsUri=hdfs://localhost:33847, srcDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:05:36,919 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:33847, inputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788 2024-11-22T06:05:36,919 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,921 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-22T06:05:36,925 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742311_1487 (size=663) 2024-11-22T06:05:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742311_1487 (size=663) 2024-11-22T06:05:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742310_1486 (size=198) 2024-11-22T06:05:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742310_1486 (size=198) 2024-11-22T06:05:36,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742310_1486 (size=198) 2024-11-22T06:05:36,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742311_1487 (size=663) 2024-11-22T06:05:36,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:36,940 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:36,941 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:37,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-3390284788175273787.jar 2024-11-22T06:05:37,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:37,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:38,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop-14988818052057697598.jar 2024-11-22T06:05:38,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:38,025 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:38,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:38,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:38,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:38,026 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-22T06:05:38,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-22T06:05:38,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-22T06:05:38,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-22T06:05:38,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-22T06:05:38,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-22T06:05:38,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-22T06:05:38,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-22T06:05:38,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-22T06:05:38,029 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-22T06:05:38,029 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-22T06:05:38,029 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-22T06:05:38,029 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:38,030 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:38,030 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:05:38,030 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:38,030 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-22T06:05:38,031 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:05:38,031 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-22T06:05:38,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742312_1488 (size=131440) 2024-11-22T06:05:38,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742312_1488 (size=131440) 2024-11-22T06:05:38,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742312_1488 (size=131440) 2024-11-22T06:05:38,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742313_1489 (size=4188619) 2024-11-22T06:05:38,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742313_1489 (size=4188619) 2024-11-22T06:05:38,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742313_1489 (size=4188619) 2024-11-22T06:05:38,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742314_1490 (size=1323991) 2024-11-22T06:05:38,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742314_1490 (size=1323991) 2024-11-22T06:05:38,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742314_1490 (size=1323991) 2024-11-22T06:05:38,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742315_1491 (size=903736) 2024-11-22T06:05:38,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742315_1491 (size=903736) 2024-11-22T06:05:38,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742315_1491 (size=903736) 2024-11-22T06:05:38,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742316_1492 (size=8360083) 2024-11-22T06:05:38,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742316_1492 (size=8360083) 2024-11-22T06:05:38,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742316_1492 (size=8360083) 2024-11-22T06:05:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742317_1493 (size=6424744) 2024-11-22T06:05:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742317_1493 (size=6424744) 2024-11-22T06:05:38,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742317_1493 (size=6424744) 2024-11-22T06:05:38,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742318_1494 (size=1877034) 2024-11-22T06:05:38,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742318_1494 (size=1877034) 2024-11-22T06:05:38,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742318_1494 (size=1877034) 2024-11-22T06:05:38,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742319_1495 (size=77835) 2024-11-22T06:05:38,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742319_1495 (size=77835) 2024-11-22T06:05:38,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742319_1495 (size=77835) 2024-11-22T06:05:38,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742320_1496 (size=30949) 2024-11-22T06:05:38,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742320_1496 (size=30949) 2024-11-22T06:05:38,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742320_1496 (size=30949) 2024-11-22T06:05:38,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742321_1497 (size=1597347) 2024-11-22T06:05:38,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742321_1497 (size=1597347) 2024-11-22T06:05:38,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742321_1497 (size=1597347) 2024-11-22T06:05:38,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742322_1498 (size=4695811) 2024-11-22T06:05:38,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742322_1498 (size=4695811) 2024-11-22T06:05:38,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742322_1498 (size=4695811) 2024-11-22T06:05:38,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742323_1499 (size=232957) 2024-11-22T06:05:38,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742323_1499 (size=232957) 2024-11-22T06:05:38,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742323_1499 (size=232957) 2024-11-22T06:05:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742324_1500 (size=127628) 2024-11-22T06:05:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742324_1500 (size=127628) 2024-11-22T06:05:38,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742324_1500 (size=127628) 2024-11-22T06:05:38,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742325_1501 (size=20406) 2024-11-22T06:05:38,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742325_1501 (size=20406) 2024-11-22T06:05:38,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742325_1501 (size=20406) 2024-11-22T06:05:38,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742326_1502 (size=5175431) 2024-11-22T06:05:38,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742326_1502 (size=5175431) 2024-11-22T06:05:38,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742326_1502 (size=5175431) 2024-11-22T06:05:38,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742327_1503 (size=217634) 2024-11-22T06:05:38,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742327_1503 (size=217634) 2024-11-22T06:05:38,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742327_1503 (size=217634) 2024-11-22T06:05:38,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742328_1504 (size=440656) 2024-11-22T06:05:38,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742328_1504 (size=440656) 2024-11-22T06:05:38,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742328_1504 (size=440656) 2024-11-22T06:05:38,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742329_1505 (size=1832290) 2024-11-22T06:05:38,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742329_1505 (size=1832290) 2024-11-22T06:05:38,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742329_1505 (size=1832290) 2024-11-22T06:05:38,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742330_1506 (size=322274) 2024-11-22T06:05:38,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742330_1506 (size=322274) 2024-11-22T06:05:38,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742330_1506 (size=322274) 2024-11-22T06:05:38,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742331_1507 (size=503880) 2024-11-22T06:05:38,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742331_1507 (size=503880) 2024-11-22T06:05:38,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742331_1507 (size=503880) 2024-11-22T06:05:38,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742332_1508 (size=29229) 2024-11-22T06:05:38,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742332_1508 (size=29229) 2024-11-22T06:05:38,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742332_1508 (size=29229) 2024-11-22T06:05:38,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742333_1509 (size=24096) 2024-11-22T06:05:38,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742333_1509 (size=24096) 2024-11-22T06:05:38,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742333_1509 (size=24096) 2024-11-22T06:05:38,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742334_1510 (size=111872) 2024-11-22T06:05:38,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742334_1510 (size=111872) 2024-11-22T06:05:38,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742334_1510 (size=111872) 2024-11-22T06:05:38,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742335_1511 (size=45609) 2024-11-22T06:05:38,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742335_1511 (size=45609) 2024-11-22T06:05:38,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742335_1511 (size=45609) 2024-11-22T06:05:38,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742336_1512 (size=136454) 2024-11-22T06:05:38,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742336_1512 (size=136454) 2024-11-22T06:05:38,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742336_1512 (size=136454) 2024-11-22T06:05:38,322 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-22T06:05:38,324 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-22T06:05:38,325 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-22T06:05:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742337_1513 (size=366) 2024-11-22T06:05:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742337_1513 (size=366) 2024-11-22T06:05:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742337_1513 (size=366) 2024-11-22T06:05:38,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742338_1514 (size=15) 2024-11-22T06:05:38,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742338_1514 (size=15) 2024-11-22T06:05:38,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742338_1514 (size=15) 2024-11-22T06:05:38,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742339_1515 (size=303904) 2024-11-22T06:05:38,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742339_1515 (size=303904) 2024-11-22T06:05:38,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742339_1515 (size=303904) 2024-11-22T06:05:39,494 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:05:39,494 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-22T06:05:39,497 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0009_000001 (auth:SIMPLE) from 127.0.0.1:57738 2024-11-22T06:05:39,506 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0009/container_1732255328437_0009_01_000001/launch_container.sh] 2024-11-22T06:05:39,506 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0009/container_1732255328437_0009_01_000001/container_tokens] 2024-11-22T06:05:39,507 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-1_1/usercache/jenkins/appcache/application_1732255328437_0009/container_1732255328437_0009_01_000001/sysfs] 2024-11-22T06:05:40,131 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0010_000001 (auth:SIMPLE) from 127.0.0.1:47078 2024-11-22T06:05:40,362 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:05:41,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:41,126 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-22T06:05:41,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-22T06:05:45,711 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0010_000001 (auth:SIMPLE) from 127.0.0.1:47288 2024-11-22T06:05:46,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742340_1516 (size=349578) 2024-11-22T06:05:46,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742340_1516 (size=349578) 2024-11-22T06:05:46,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742340_1516 (size=349578) 2024-11-22T06:05:46,629 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:05:47,944 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0010_000001 (auth:SIMPLE) from 127.0.0.1:50486 2024-11-22T06:05:48,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c1407c3a5417180f7dcb0e325387e8f9, had cached 0 bytes from a total of 8188 2024-11-22T06:05:48,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bcf300d151c9df6bfc8b093b51457a6c, had cached 0 bytes from a total of 5422 2024-11-22T06:05:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742341_1517 (size=8324) 2024-11-22T06:05:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742341_1517 (size=8324) 2024-11-22T06:05:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742341_1517 (size=8324) 2024-11-22T06:05:50,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742342_1518 (size=5286) 2024-11-22T06:05:50,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742342_1518 (size=5286) 2024-11-22T06:05:50,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742342_1518 (size=5286) 2024-11-22T06:05:50,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742343_1519 (size=17455) 2024-11-22T06:05:50,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742343_1519 (size=17455) 2024-11-22T06:05:50,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742343_1519 (size=17455) 2024-11-22T06:05:50,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742344_1520 (size=476) 2024-11-22T06:05:50,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742344_1520 (size=476) 2024-11-22T06:05:50,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742344_1520 (size=476) 2024-11-22T06:05:50,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742345_1521 (size=17455) 2024-11-22T06:05:50,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742345_1521 (size=17455) 2024-11-22T06:05:50,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742345_1521 (size=17455) 2024-11-22T06:05:50,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742346_1522 (size=349578) 2024-11-22T06:05:50,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742346_1522 (size=349578) 2024-11-22T06:05:50,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742346_1522 (size=349578) 2024-11-22T06:05:50,749 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0010_000001 (auth:SIMPLE) from 127.0.0.1:50500 2024-11-22T06:05:50,760 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732255328437_0010_01_000002 is : 143 2024-11-22T06:05:50,774 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0010/container_1732255328437_0010_01_000002/launch_container.sh] 2024-11-22T06:05:50,775 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0010/container_1732255328437_0010_01_000002/container_tokens] 2024-11-22T06:05:50,775 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_2/usercache/jenkins/appcache/application_1732255328437_0010/container_1732255328437_0010_01_000002/sysfs] 2024-11-22T06:05:52,476 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-22T06:05:52,476 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-22T06:05:52,482 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,482 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-22T06:05:52,483 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-22T06:05:52,483 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,483 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-22T06:05:52,483 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-22T06:05:52,483 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2145612548_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,484 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-22T06:05:52,484 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/export-test/export-1732255536879/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-22T06:05:52,490 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-22T06:05:52,492 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255552492"}]},"ts":"1732255552492"} 2024-11-22T06:05:52,494 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-22T06:05:52,494 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-22T06:05:52,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-22T06:05:52,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, UNASSIGN}, {pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, UNASSIGN}] 2024-11-22T06:05:52,497 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, UNASSIGN 2024-11-22T06:05:52,497 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, UNASSIGN 2024-11-22T06:05:52,497 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=70cc174be652d64ec07f1005eaa473af, regionState=CLOSING, regionLocation=407411ea95db,44619,1732255321573 2024-11-22T06:05:52,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=11b79421a8f4d2dc2c325ae22e6ed4f0, regionState=CLOSING, regionLocation=407411ea95db,35283,1732255321784 2024-11-22T06:05:52,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, UNASSIGN because future has completed 2024-11-22T06:05:52,499 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:05:52,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 70cc174be652d64ec07f1005eaa473af, server=407411ea95db,44619,1732255321573}] 2024-11-22T06:05:52,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, UNASSIGN because future has completed 2024-11-22T06:05:52,500 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T06:05:52,500 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0, server=407411ea95db,35283,1732255321784}] 2024-11-22T06:05:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-22T06:05:52,652 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(122): Close 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:52,652 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:05:52,652 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1722): Closing 70cc174be652d64ec07f1005eaa473af, disabling compactions & flushes 2024-11-22T06:05:52,652 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:52,652 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:52,652 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. after waiting 0 ms 2024-11-22T06:05:52,652 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:52,652 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(122): Close 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:52,652 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-22T06:05:52,653 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1722): Closing 11b79421a8f4d2dc2c325ae22e6ed4f0, disabling compactions & flushes 2024-11-22T06:05:52,653 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:52,653 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:52,653 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. after waiting 0 ms 2024-11-22T06:05:52,653 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:52,656 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:05:52,656 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:05:52,656 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0. 2024-11-22T06:05:52,656 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T06:05:52,657 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1676): Region close journal for 11b79421a8f4d2dc2c325ae22e6ed4f0: Waiting for close lock at 1732255552652Running coprocessor pre-close hooks at 1732255552652Disabling compacts and flushes for region at 1732255552652Disabling writes for close at 1732255552653 (+1 ms)Writing region close event to WAL at 1732255552653Running coprocessor post-close hooks at 1732255552656 (+3 ms)Closed at 1732255552656 2024-11-22T06:05:52,657 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:05:52,657 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af. 2024-11-22T06:05:52,657 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1676): Region close journal for 70cc174be652d64ec07f1005eaa473af: Waiting for close lock at 1732255552652Running coprocessor pre-close hooks at 1732255552652Disabling compacts and flushes for region at 1732255552652Disabling writes for close at 1732255552652Writing region close event to WAL at 1732255552653 (+1 ms)Running coprocessor post-close hooks at 1732255552657 (+4 ms)Closed at 1732255552657 2024-11-22T06:05:52,658 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(157): Closed 11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:52,659 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=11b79421a8f4d2dc2c325ae22e6ed4f0, regionState=CLOSED 2024-11-22T06:05:52,659 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(157): Closed 70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:52,660 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=70cc174be652d64ec07f1005eaa473af, regionState=CLOSED 2024-11-22T06:05:52,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0, server=407411ea95db,35283,1732255321784 because future has completed 2024-11-22T06:05:52,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 70cc174be652d64ec07f1005eaa473af, server=407411ea95db,44619,1732255321573 because future has completed 2024-11-22T06:05:52,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-11-22T06:05:52,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=206 2024-11-22T06:05:52,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; CloseRegionProcedure 11b79421a8f4d2dc2c325ae22e6ed4f0, server=407411ea95db,35283,1732255321784 in 161 msec 2024-11-22T06:05:52,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure 70cc174be652d64ec07f1005eaa473af, server=407411ea95db,44619,1732255321573 in 162 msec 2024-11-22T06:05:52,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11b79421a8f4d2dc2c325ae22e6ed4f0, UNASSIGN in 166 msec 2024-11-22T06:05:52,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=206, resume processing ppid=205 2024-11-22T06:05:52,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=70cc174be652d64ec07f1005eaa473af, UNASSIGN in 167 msec 2024-11-22T06:05:52,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-22T06:05:52,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 171 msec 2024-11-22T06:05:52,667 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732255552666"}]},"ts":"1732255552666"} 2024-11-22T06:05:52,668 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-22T06:05:52,668 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-22T06:05:52,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 178 msec 2024-11-22T06:05:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-22T06:05:52,809 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-22T06:05:52,809 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] procedure2.ProcedureExecutor(1139): Stored pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,811 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,812 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=210, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,814 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,815 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:52,815 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:52,817 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/recovered.edits] 2024-11-22T06:05:52,817 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/cf, FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/recovered.edits] 2024-11-22T06:05:52,820 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/cf/b2d596265ec24efaa32cb338d662bf48 to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/cf/b2d596265ec24efaa32cb338d662bf48 2024-11-22T06:05:52,820 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/cf/9aebab52c67548fd92e0d3c326af392f to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/cf/9aebab52c67548fd92e0d3c326af392f 2024-11-22T06:05:52,822 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af/recovered.edits/9.seqid 2024-11-22T06:05:52,822 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/recovered.edits/9.seqid to hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0/recovered.edits/9.seqid 2024-11-22T06:05:52,822 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/11b79421a8f4d2dc2c325ae22e6ed4f0 2024-11-22T06:05:52,822 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testtb-testExportFileSystemStateWithSkipTmp/70cc174be652d64ec07f1005eaa473af 2024-11-22T06:05:52,822 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-22T06:05:52,824 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=210, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:52,826 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-22T06:05:53,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,084 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-22T06:05:53,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-22T06:05:53,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-22T06:05:53,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-22T06:05:53,085 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-22T06:05:53,086 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=210, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,087 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-22T06:05:53,087 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255553087"}]},"ts":"9223372036854775807"} 2024-11-22T06:05:53,087 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732255553087"}]},"ts":"9223372036854775807"} 2024-11-22T06:05:53,089 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-22T06:05:53,089 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 70cc174be652d64ec07f1005eaa473af, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732255535207.70cc174be652d64ec07f1005eaa473af.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 11b79421a8f4d2dc2c325ae22e6ed4f0, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732255535207.11b79421a8f4d2dc2c325ae22e6ed4f0.', STARTKEY => '1', ENDKEY => ''}] 2024-11-22T06:05:53,089 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-22T06:05:53,089 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732255553089"}]},"ts":"9223372036854775807"} 2024-11-22T06:05:53,090 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-22T06:05:53,091 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=210, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 282 msec 2024-11-22T06:05:53,226 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:53,226 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:53,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:53,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-22T06:05:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-22T06:05:53,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:53,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:53,227 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:53,227 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-22T06:05:53,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-22T06:05:53,232 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-22T06:05:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,234 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-22T06:05:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-22T06:05:53,253 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=813 (was 807) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1695979335_1 at /127.0.0.1:50174 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 138092) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:42886 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:37426 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1113730973) connection to localhost/127.0.0.1:34411 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-7133 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1695979335_1 at /127.0.0.1:41550 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2145612548_22 at /127.0.0.1:46390 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 807), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=587 (was 647), ProcessCount=16 (was 17), AvailableMemoryMB=4645 (was 4457) - AvailableMemoryMB LEAK? - 2024-11-22T06:05:53,253 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-11-22T06:05:53,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-22T06:05:53,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a2c53a0{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-22T06:05:53,260 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a18dd6d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:05:53,260 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:05:53,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c4deda7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-22T06:05:53,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47b809eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:05:56,824 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732255328437_0010_000001 (auth:SIMPLE) from 127.0.0.1:37314 2024-11-22T06:05:56,838 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0010/container_1732255328437_0010_01_000001/launch_container.sh] 2024-11-22T06:05:56,838 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0010/container_1732255328437_0010_01_000001/container_tokens] 2024-11-22T06:05:56,839 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1754703613/yarn-5946224054/MiniMRCluster_1754703613-localDir-nm-0_3/usercache/jenkins/appcache/application_1732255328437_0010/container_1732255328437_0010_01_000001/sysfs] 2024-11-22T06:05:58,180 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:05:59,722 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:06:01,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-22T06:06:03,370 WARN [regionserver/407411ea95db:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 5, running: 1 2024-11-22T06:06:06,631 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:06:10,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@639863e9{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-22T06:06:10,279 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57cb55f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:06:10,279 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:06:10,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fd06f3c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-22T06:06:10,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54218519{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:06:27,296 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-22T06:06:27,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d75379e{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-22T06:06:27,297 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@96ef0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:06:27,297 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:06:27,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a8bd20{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-22T06:06:27,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7147d60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:06:27,301 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-22T06:06:27,304 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-22T06:06:27,304 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-22T06:06:27,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741830_1006 (size=968218) 2024-11-22T06:06:27,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741830_1006 (size=968218) 2024-11-22T06:06:27,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741830_1006 (size=968218) 2024-11-22T06:06:27,310 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-22T06:06:27,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61cf6e04{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-22T06:06:27,312 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63fe27c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:06:27,312 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:06:27,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68345ece{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-22T06:06:27,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2257d98e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:06:27,314 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-22T06:06:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-22T06:06:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T06:06:27,314 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T06:06:27,314 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T06:06:27,314 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,314 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,314 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T06:06:27,314 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T06:06:27,314 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2040249828, stopped=false 2024-11-22T06:06:27,315 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,315 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-22T06:06:27,315 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=407411ea95db,40429,1732255320691 2024-11-22T06:06:27,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T06:06:27,361 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T06:06:27,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T06:06:27,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T06:06:27,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:06:27,361 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:06:27,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:06:27,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:06:27,361 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T06:06:27,362 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:06:27,362 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:06:27,362 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:06:27,362 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-22T06:06:27,363 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T06:06:27,363 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T06:06:27,363 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(3091): Received CLOSE for bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:06:27,363 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T06:06:27,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,365 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '407411ea95db,44619,1732255321573' ***** 2024-11-22T06:06:27,365 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bcf300d151c9df6bfc8b093b51457a6c, disabling compactions & flushes 2024-11-22T06:06:27,365 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:06:27,365 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:06:27,365 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. after waiting 0 ms 2024-11-22T06:06:27,365 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,365 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:06:27,365 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T06:06:27,365 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '407411ea95db,41799,1732255321718' ***** 2024-11-22T06:06:27,365 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,365 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T06:06:27,366 INFO [RS:1;407411ea95db:41799 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T06:06:27,366 INFO [RS:0;407411ea95db:44619 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T06:06:27,366 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '407411ea95db,35283,1732255321784' ***** 2024-11-22T06:06:27,366 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T06:06:27,366 INFO [RS:0;407411ea95db:44619 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T06:06:27,366 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T06:06:27,366 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,366 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T06:06:27,366 INFO [RS:0;407411ea95db:44619 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T06:06:27,366 INFO [RS:1;407411ea95db:41799 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T06:06:27,366 INFO [RS:1;407411ea95db:41799 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T06:06:27,367 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(959): stopping server 407411ea95db,44619,1732255321573 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(3091): Received CLOSE for c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:06:27,367 INFO [RS:0;407411ea95db:44619 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T06:06:27,367 INFO [RS:2;407411ea95db:35283 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T06:06:27,367 INFO [RS:0;407411ea95db:44619 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;407411ea95db:44619. 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(3091): Received CLOSE for c1407c3a5417180f7dcb0e325387e8f9 2024-11-22T06:06:27,367 INFO [RS:2;407411ea95db:35283 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(959): stopping server 407411ea95db,41799,1732255321718 2024-11-22T06:06:27,367 INFO [RS:2;407411ea95db:35283 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T06:06:27,367 DEBUG [RS:0;407411ea95db:44619 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T06:06:27,367 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(959): stopping server 407411ea95db,35283,1732255321784 2024-11-22T06:06:27,367 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c2a1af531af5dfa747044be7409c1d27, disabling compactions & flushes 2024-11-22T06:06:27,367 DEBUG [RS:0;407411ea95db:44619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,367 INFO [RS:2;407411ea95db:35283 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;407411ea95db:41799. 2024-11-22T06:06:27,367 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:06:27,367 INFO [RS:2;407411ea95db:35283 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;407411ea95db:35283. 2024-11-22T06:06:27,367 DEBUG [RS:1;407411ea95db:41799 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T06:06:27,367 INFO [regionserver/407411ea95db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T06:06:27,367 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:06:27,367 DEBUG [RS:1;407411ea95db:41799 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,367 DEBUG [RS:2;407411ea95db:35283 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T06:06:27,367 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T06:06:27,367 DEBUG [RS:2;407411ea95db:35283 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,367 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. after waiting 0 ms 2024-11-22T06:06:27,367 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:06:27,367 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1325): Online Regions={bcf300d151c9df6bfc8b093b51457a6c=testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c.} 2024-11-22T06:06:27,367 INFO [regionserver/407411ea95db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T06:06:27,367 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(976): stopping server 407411ea95db,35283,1732255321784; all regions closed. 2024-11-22T06:06:27,367 INFO [RS:1;407411ea95db:41799 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T06:06:27,368 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T06:06:27,368 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T06:06:27,368 DEBUG [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1351): Waiting on bcf300d151c9df6bfc8b093b51457a6c 2024-11-22T06:06:27,368 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c2a1af531af5dfa747044be7409c1d27 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-11-22T06:06:27,369 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-22T06:06:27,369 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1325): Online Regions={c2a1af531af5dfa747044be7409c1d27=hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., 1588230740=hbase:meta,,1.1588230740, c1407c3a5417180f7dcb0e325387e8f9=testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9.} 2024-11-22T06:06:27,369 DEBUG [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c1407c3a5417180f7dcb0e325387e8f9, c2a1af531af5dfa747044be7409c1d27 2024-11-22T06:06:27,369 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T06:06:27,369 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T06:06:27,369 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T06:06:27,369 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T06:06:27,369 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T06:06:27,369 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=69.66 KB heapSize=111.04 KB 2024-11-22T06:06:27,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741835_1011 (size=11026) 2024-11-22T06:06:27,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741835_1011 (size=11026) 2024-11-22T06:06:27,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741835_1011 (size=11026) 2024-11-22T06:06:27,375 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/bcf300d151c9df6bfc8b093b51457a6c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-22T06:06:27,375 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,375 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:06:27,375 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bcf300d151c9df6bfc8b093b51457a6c: Waiting for close lock at 1732255587364Running coprocessor pre-close hooks at 1732255587365 (+1 ms)Disabling compacts and flushes for region at 1732255587365Disabling writes for close at 1732255587365Writing region close event to WAL at 1732255587369 (+4 ms)Running coprocessor post-close hooks at 1732255587375 (+6 ms)Closed at 1732255587375 2024-11-22T06:06:27,376 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732255457883.bcf300d151c9df6bfc8b093b51457a6c. 2024-11-22T06:06:27,379 DEBUG [RS:2;407411ea95db:35283 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs 2024-11-22T06:06:27,379 INFO [RS:2;407411ea95db:35283 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 407411ea95db%2C35283%2C1732255321784:(num 1732255323550) 2024-11-22T06:06:27,379 DEBUG [RS:2;407411ea95db:35283 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,379 INFO [RS:2;407411ea95db:35283 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T06:06:27,379 INFO [RS:2;407411ea95db:35283 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T06:06:27,380 INFO [RS:2;407411ea95db:35283 {}] hbase.ChoreService(370): Chore service for: regionserver/407411ea95db:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T06:06:27,380 INFO [RS:2;407411ea95db:35283 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T06:06:27,380 INFO [RS:2;407411ea95db:35283 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T06:06:27,380 INFO [RS:2;407411ea95db:35283 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T06:06:27,380 INFO [RS:2;407411ea95db:35283 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T06:06:27,380 INFO [regionserver/407411ea95db:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T06:06:27,380 INFO [RS:2;407411ea95db:35283 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35283 2024-11-22T06:06:27,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T06:06:27,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/407411ea95db,35283,1732255321784 2024-11-22T06:06:27,393 INFO [RS:2;407411ea95db:35283 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T06:06:27,393 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27/.tmp/l/df278ac719b546b684076c04cfcf5209 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732255455933/DeleteFamily/seqid=0 2024-11-22T06:06:27,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742347_1523 (size=5695) 2024-11-22T06:06:27,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742347_1523 (size=5695) 2024-11-22T06:06:27,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742347_1523 (size=5695) 2024-11-22T06:06:27,399 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/info/3319498044094a30a562ab0386ae5cd7 is 173, key is testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9./info:regioninfo/1732255458241/Put/seqid=0 2024-11-22T06:06:27,402 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27/.tmp/l/df278ac719b546b684076c04cfcf5209 2024-11-22T06:06:27,402 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [407411ea95db,35283,1732255321784] 2024-11-22T06:06:27,403 INFO [regionserver/407411ea95db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T06:06:27,403 INFO [regionserver/407411ea95db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T06:06:27,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742348_1524 (size=14362) 2024-11-22T06:06:27,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742348_1524 (size=14362) 2024-11-22T06:06:27,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742348_1524 (size=14362) 2024-11-22T06:06:27,406 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.12 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/info/3319498044094a30a562ab0386ae5cd7 2024-11-22T06:06:27,407 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for df278ac719b546b684076c04cfcf5209 2024-11-22T06:06:27,408 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27/.tmp/l/df278ac719b546b684076c04cfcf5209 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27/l/df278ac719b546b684076c04cfcf5209 2024-11-22T06:06:27,410 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/407411ea95db,35283,1732255321784 already deleted, retry=false 2024-11-22T06:06:27,410 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 407411ea95db,35283,1732255321784 expired; onlineServers=2 2024-11-22T06:06:27,412 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for df278ac719b546b684076c04cfcf5209 2024-11-22T06:06:27,412 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27/l/df278ac719b546b684076c04cfcf5209, entries=12, sequenceid=27, filesize=5.6 K 2024-11-22T06:06:27,413 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for c2a1af531af5dfa747044be7409c1d27 in 46ms, sequenceid=27, compaction requested=false 2024-11-22T06:06:27,419 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/acl/c2a1af531af5dfa747044be7409c1d27/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-22T06:06:27,420 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,420 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:06:27,420 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c2a1af531af5dfa747044be7409c1d27: Waiting for close lock at 1732255587367Running coprocessor pre-close hooks at 1732255587367Disabling compacts and flushes for region at 1732255587367Disabling writes for close at 1732255587367Obtaining lock to block concurrent updates at 1732255587368 (+1 ms)Preparing flush snapshotting stores in c2a1af531af5dfa747044be7409c1d27 at 1732255587368Finished memstore snapshotting hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27., syncing WAL and waiting on mvcc, flushsize=dataSize=1412, getHeapSize=3392, getOffHeapSize=0, getCellsCount=23 at 1732255587369 (+1 ms)Flushing stores of hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. at 1732255587369Flushing c2a1af531af5dfa747044be7409c1d27/l: creating writer at 1732255587369Flushing c2a1af531af5dfa747044be7409c1d27/l: appending metadata at 1732255587393 (+24 ms)Flushing c2a1af531af5dfa747044be7409c1d27/l: closing flushed file at 1732255587393Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10b78434: reopening flushed file at 1732255587407 (+14 ms)Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for c2a1af531af5dfa747044be7409c1d27 in 46ms, sequenceid=27, compaction requested=false at 1732255587413 (+6 ms)Writing region close event to WAL at 1732255587415 (+2 ms)Running coprocessor post-close hooks at 1732255587420 (+5 ms)Closed at 1732255587420 2024-11-22T06:06:27,420 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732255324607.c2a1af531af5dfa747044be7409c1d27. 2024-11-22T06:06:27,420 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c1407c3a5417180f7dcb0e325387e8f9, disabling compactions & flushes 2024-11-22T06:06:27,420 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:06:27,420 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:06:27,420 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. after waiting 0 ms 2024-11-22T06:06:27,420 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:06:27,425 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/default/testExportExpiredSnapshot/c1407c3a5417180f7dcb0e325387e8f9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-22T06:06:27,426 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,426 INFO [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:06:27,426 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c1407c3a5417180f7dcb0e325387e8f9: Waiting for close lock at 1732255587420Running coprocessor pre-close hooks at 1732255587420Disabling compacts and flushes for region at 1732255587420Disabling writes for close at 1732255587420Writing region close event to WAL at 1732255587423 (+3 ms)Running coprocessor post-close hooks at 1732255587426 (+3 ms)Closed at 1732255587426 2024-11-22T06:06:27,426 DEBUG [RS_CLOSE_REGION-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732255457883.c1407c3a5417180f7dcb0e325387e8f9. 2024-11-22T06:06:27,428 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/ns/ee96bf6ebaa14de0816652b76dd29af7 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b./ns:/1732255455946/DeleteFamily/seqid=0 2024-11-22T06:06:27,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742349_1525 (size=7779) 2024-11-22T06:06:27,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742349_1525 (size=7779) 2024-11-22T06:06:27,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742349_1525 (size=7779) 2024-11-22T06:06:27,432 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.23 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/ns/ee96bf6ebaa14de0816652b76dd29af7 2024-11-22T06:06:27,438 INFO [regionserver/407411ea95db:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T06:06:27,442 INFO [regionserver/407411ea95db:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T06:06:27,442 INFO [regionserver/407411ea95db:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T06:06:27,448 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/rep_barrier/56d4a285494844a785426715ddfeb027 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b./rep_barrier:/1732255455946/DeleteFamily/seqid=0 2024-11-22T06:06:27,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742350_1526 (size=8005) 2024-11-22T06:06:27,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742350_1526 (size=8005) 2024-11-22T06:06:27,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742350_1526 (size=8005) 2024-11-22T06:06:27,452 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/rep_barrier/56d4a285494844a785426715ddfeb027 2024-11-22T06:06:27,466 INFO [regionserver/407411ea95db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T06:06:27,466 INFO [regionserver/407411ea95db:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T06:06:27,468 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/table/f029f1757ae941e6ae4b2c0d7cdfea3d is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732255438221.ef394ae7b5140734d6b528266ee80f5b./table:/1732255455946/DeleteFamily/seqid=0 2024-11-22T06:06:27,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742351_1527 (size=8758) 2024-11-22T06:06:27,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742351_1527 (size=8758) 2024-11-22T06:06:27,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742351_1527 (size=8758) 2024-11-22T06:06:27,473 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.97 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/table/f029f1757ae941e6ae4b2c0d7cdfea3d 2024-11-22T06:06:27,476 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/info/3319498044094a30a562ab0386ae5cd7 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/info/3319498044094a30a562ab0386ae5cd7 2024-11-22T06:06:27,480 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/info/3319498044094a30a562ab0386ae5cd7, entries=74, sequenceid=199, filesize=14.0 K 2024-11-22T06:06:27,481 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/ns/ee96bf6ebaa14de0816652b76dd29af7 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/ns/ee96bf6ebaa14de0816652b76dd29af7 2024-11-22T06:06:27,485 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/ns/ee96bf6ebaa14de0816652b76dd29af7, entries=23, sequenceid=199, filesize=7.6 K 2024-11-22T06:06:27,486 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/rep_barrier/56d4a285494844a785426715ddfeb027 as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/rep_barrier/56d4a285494844a785426715ddfeb027 2024-11-22T06:06:27,490 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/rep_barrier/56d4a285494844a785426715ddfeb027, entries=21, sequenceid=199, filesize=7.8 K 2024-11-22T06:06:27,491 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/.tmp/table/f029f1757ae941e6ae4b2c0d7cdfea3d as hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/table/f029f1757ae941e6ae4b2c0d7cdfea3d 2024-11-22T06:06:27,495 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/table/f029f1757ae941e6ae4b2c0d7cdfea3d, entries=36, sequenceid=199, filesize=8.6 K 2024-11-22T06:06:27,495 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=199, compaction requested=false 2024-11-22T06:06:27,499 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/data/hbase/meta/1588230740/recovered.edits/202.seqid, newMaxSeqId=202, maxSeqId=1 2024-11-22T06:06:27,500 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:27,500 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T06:06:27,500 INFO [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T06:06:27,500 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732255587369Running coprocessor pre-close hooks at 1732255587369Disabling compacts and flushes for region at 1732255587369Disabling writes for close at 1732255587369Obtaining lock to block concurrent updates at 1732255587369Preparing flush snapshotting stores in 1588230740 at 1732255587369Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=71334, getHeapSize=113640, getOffHeapSize=0, getCellsCount=548 at 1732255587370 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732255587370Flushing 1588230740/info: creating writer at 1732255587370Flushing 1588230740/info: appending metadata at 1732255587399 (+29 ms)Flushing 1588230740/info: closing flushed file at 1732255587399Flushing 1588230740/ns: creating writer at 1732255587410 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732255587427 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732255587427Flushing 1588230740/rep_barrier: creating writer at 1732255587435 (+8 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732255587447 (+12 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732255587447Flushing 1588230740/table: creating writer at 1732255587456 (+9 ms)Flushing 1588230740/table: appending metadata at 1732255587468 (+12 ms)Flushing 1588230740/table: closing flushed file at 1732255587468Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7623ac17: reopening flushed file at 1732255587476 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7029e0d9: reopening flushed file at 1732255587480 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79700382: reopening flushed file at 1732255587485 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d987e49: reopening flushed file at 1732255587490 (+5 ms)Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=199, compaction requested=false at 1732255587495 (+5 ms)Writing region close event to WAL at 1732255587497 (+2 ms)Running coprocessor post-close hooks at 1732255587500 (+3 ms)Closed at 1732255587500 2024-11-22T06:06:27,500 DEBUG [RS_CLOSE_META-regionserver/407411ea95db:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T06:06:27,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:06:27,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35283-0x101626c19a90003, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:06:27,503 INFO [RS:2;407411ea95db:35283 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T06:06:27,503 INFO [RS:2;407411ea95db:35283 {}] regionserver.HRegionServer(1031): Exiting; stopping=407411ea95db,35283,1732255321784; zookeeper connection closed. 2024-11-22T06:06:27,503 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2f416202 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2f416202 2024-11-22T06:06:27,568 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(976): stopping server 407411ea95db,44619,1732255321573; all regions closed. 2024-11-22T06:06:27,569 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(976): stopping server 407411ea95db,41799,1732255321718; all regions closed. 2024-11-22T06:06:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741834_1010 (size=15785) 2024-11-22T06:06:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741834_1010 (size=15785) 2024-11-22T06:06:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741834_1010 (size=15785) 2024-11-22T06:06:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741836_1012 (size=81723) 2024-11-22T06:06:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741836_1012 (size=81723) 2024-11-22T06:06:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741836_1012 (size=81723) 2024-11-22T06:06:27,577 DEBUG [RS:0;407411ea95db:44619 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs 2024-11-22T06:06:27,577 INFO [RS:0;407411ea95db:44619 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 407411ea95db%2C44619%2C1732255321573:(num 1732255323548) 2024-11-22T06:06:27,577 DEBUG [RS:0;407411ea95db:44619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,577 INFO [RS:0;407411ea95db:44619 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T06:06:27,577 DEBUG [RS:1;407411ea95db:41799 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs 2024-11-22T06:06:27,577 INFO [RS:1;407411ea95db:41799 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 407411ea95db%2C41799%2C1732255321718.meta:.meta(num 1732255324163) 2024-11-22T06:06:27,577 INFO [RS:0;407411ea95db:44619 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T06:06:27,577 INFO [RS:0;407411ea95db:44619 {}] hbase.ChoreService(370): Chore service for: regionserver/407411ea95db:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T06:06:27,577 INFO [RS:0;407411ea95db:44619 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T06:06:27,578 INFO [RS:0;407411ea95db:44619 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T06:06:27,578 INFO [RS:0;407411ea95db:44619 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T06:06:27,578 INFO [regionserver/407411ea95db:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T06:06:27,578 INFO [RS:0;407411ea95db:44619 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T06:06:27,578 INFO [RS:0;407411ea95db:44619 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44619 2024-11-22T06:06:27,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073741833_1009 (size=10142) 2024-11-22T06:06:27,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073741833_1009 (size=10142) 2024-11-22T06:06:27,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073741833_1009 (size=10142) 2024-11-22T06:06:27,581 DEBUG [RS:1;407411ea95db:41799 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/oldWALs 2024-11-22T06:06:27,581 INFO [RS:1;407411ea95db:41799 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 407411ea95db%2C41799%2C1732255321718:(num 1732255323550) 2024-11-22T06:06:27,581 DEBUG [RS:1;407411ea95db:41799 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T06:06:27,581 INFO [RS:1;407411ea95db:41799 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T06:06:27,581 INFO [RS:1;407411ea95db:41799 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T06:06:27,581 INFO [RS:1;407411ea95db:41799 {}] hbase.ChoreService(370): Chore service for: regionserver/407411ea95db:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T06:06:27,581 INFO [RS:1;407411ea95db:41799 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T06:06:27,581 INFO [regionserver/407411ea95db:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T06:06:27,582 INFO [RS:1;407411ea95db:41799 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41799 2024-11-22T06:06:27,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T06:06:27,602 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/407411ea95db,44619,1732255321573 2024-11-22T06:06:27,602 INFO [RS:0;407411ea95db:44619 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T06:06:27,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/407411ea95db,41799,1732255321718 2024-11-22T06:06:27,610 INFO [RS:1;407411ea95db:41799 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T06:06:27,619 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [407411ea95db,44619,1732255321573] 2024-11-22T06:06:27,635 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/407411ea95db,44619,1732255321573 already deleted, retry=false 2024-11-22T06:06:27,635 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 407411ea95db,44619,1732255321573 expired; onlineServers=1 2024-11-22T06:06:27,635 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [407411ea95db,41799,1732255321718] 2024-11-22T06:06:27,644 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/407411ea95db,41799,1732255321718 already deleted, retry=false 2024-11-22T06:06:27,644 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 407411ea95db,41799,1732255321718 expired; onlineServers=0 2024-11-22T06:06:27,644 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '407411ea95db,40429,1732255320691' ***** 2024-11-22T06:06:27,644 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T06:06:27,645 INFO [M:0;407411ea95db:40429 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T06:06:27,645 INFO [M:0;407411ea95db:40429 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T06:06:27,645 DEBUG [M:0;407411ea95db:40429 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T06:06:27,645 DEBUG [M:0;407411ea95db:40429 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T06:06:27,645 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T06:06:27,645 DEBUG [master/407411ea95db:0:becomeActiveMaster-HFileCleaner.small.0-1732255323100 {}] cleaner.HFileCleaner(306): Exit Thread[master/407411ea95db:0:becomeActiveMaster-HFileCleaner.small.0-1732255323100,5,FailOnTimeoutGroup] 2024-11-22T06:06:27,645 DEBUG [master/407411ea95db:0:becomeActiveMaster-HFileCleaner.large.0-1732255323099 {}] cleaner.HFileCleaner(306): Exit Thread[master/407411ea95db:0:becomeActiveMaster-HFileCleaner.large.0-1732255323099,5,FailOnTimeoutGroup] 2024-11-22T06:06:27,646 INFO [M:0;407411ea95db:40429 {}] hbase.ChoreService(370): Chore service for: master/407411ea95db:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T06:06:27,646 INFO [M:0;407411ea95db:40429 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T06:06:27,646 DEBUG [M:0;407411ea95db:40429 {}] master.HMaster(1795): Stopping service threads 2024-11-22T06:06:27,647 INFO [M:0;407411ea95db:40429 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T06:06:27,647 INFO [M:0;407411ea95db:40429 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T06:06:27,648 INFO [M:0;407411ea95db:40429 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T06:06:27,648 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T06:06:27,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T06:06:27,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T06:06:27,652 DEBUG [M:0;407411ea95db:40429 {}] zookeeper.ZKUtil(347): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T06:06:27,652 WARN [M:0;407411ea95db:40429 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T06:06:27,653 INFO [M:0;407411ea95db:40429 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/.lastflushedseqids 2024-11-22T06:06:27,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43209 is added to blk_1073742352_1528 (size=329) 2024-11-22T06:06:27,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35893 is added to blk_1073742352_1528 (size=329) 2024-11-22T06:06:27,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43345 is added to blk_1073742352_1528 (size=329) 2024-11-22T06:06:27,665 INFO [M:0;407411ea95db:40429 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T06:06:27,665 INFO [M:0;407411ea95db:40429 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T06:06:27,666 DEBUG [M:0;407411ea95db:40429 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T06:06:27,679 INFO [M:0;407411ea95db:40429 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T06:06:27,679 DEBUG [M:0;407411ea95db:40429 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T06:06:27,679 DEBUG [M:0;407411ea95db:40429 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T06:06:27,679 DEBUG [M:0;407411ea95db:40429 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T06:06:27,679 INFO [M:0;407411ea95db:40429 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=823.18 KB heapSize=987.66 KB 2024-11-22T06:06:27,679 ERROR [AsyncFSWAL-0-hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData-prefix:407411ea95db,40429,1732255320691 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData-prefix:407411ea95db,40429,1732255320691,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:06:27,719 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:06:27,719 INFO [RS:0;407411ea95db:44619 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T06:06:27,719 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44619-0x101626c19a90001, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:06:27,719 INFO [RS:0;407411ea95db:44619 {}] regionserver.HRegionServer(1031): Exiting; stopping=407411ea95db,44619,1732255321573; zookeeper connection closed. 2024-11-22T06:06:27,719 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69b7246d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69b7246d 2024-11-22T06:06:27,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:06:27,727 INFO [RS:1;407411ea95db:41799 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T06:06:27,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41799-0x101626c19a90002, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:06:27,727 INFO [RS:1;407411ea95db:41799 {}] regionserver.HRegionServer(1031): Exiting; stopping=407411ea95db,41799,1732255321718; zookeeper connection closed. 2024-11-22T06:06:27,728 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@34bcfed5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@34bcfed5 2024-11-22T06:06:27,728 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-22T06:06:29,722 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:06:31,126 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:31,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T06:06:31,127 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T06:06:31,128 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-22T06:06:31,129 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-22T06:06:31,130 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:31,130 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-22T06:06:31,130 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-22T06:06:32,897 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-22T06:06:59,723 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:07:01,910 DEBUG [master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-22T06:07:01,910 DEBUG [master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-22T06:07:09,740 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;407411ea95db:40429 226 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 22 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@316fc991 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 3 Waited count: 15 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1473c3fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.CountDownLatch$Sync@23eab7e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10420 Waited count: 10937 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@45d56f9e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3320f2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 663 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@66a2842b-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:41781}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2765 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f511dde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33847): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 32591 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1232 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67c6f2de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33847): State: TIMED_WAITING Blocked count: 95 Waited count: 2039 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33847): State: TIMED_WAITING Blocked count: 73 Waited count: 2023 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33847): State: TIMED_WAITING Blocked count: 96 Waited count: 2010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33847): State: TIMED_WAITING Blocked count: 89 Waited count: 2026 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33847): State: TIMED_WAITING Blocked count: 83 Waited count: 2042 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(300259873)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp286242528-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp286242528-87-acceptor-0@60ab5dc9-ServerConnector@f6e660c{HTTP/1.1, (http/1.1)}{localhost:41047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp286242528-88): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp286242528-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-cba4131-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@106c429a): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44493): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 276 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7995302a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1188 Waited count: 1295 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cdce6fb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 365 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 365 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp4648250-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp4648250-122-acceptor-0@7d34b2cc-ServerConnector@7112b791{HTTP/1.1, (http/1.1)}{localhost:37893}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp4648250-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp4648250-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1113730973) connection to localhost/127.0.0.1:33847 from jenkins): State: TIMED_WAITING Blocked count: 1171 Waited count: 1171 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-7dc28379-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 0 Waited count: 1785 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@13e020c1): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34425): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 273 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ad915d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1180 Waited count: 1299 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@290bee6e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1829640939-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1829640939-154-acceptor-0@2bf788a2-ServerConnector@5ed153c3{HTTP/1.1, (http/1.1)}{localhost:42407}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1829640939-155): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1829640939-156): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7785c5c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@252ed2c8): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38763): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 256 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aa7582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1179 Waited count: 1292 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@54cf8e83): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 350 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 335 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (java.util.concurrent.ThreadPoolExecutor$Worker@46de5720[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 205 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@406f266a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@3b47d26a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59716): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 165 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 327 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f9bdfe9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:59716):): State: WAITING Blocked count: 1 Waited count: 420 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11ca1bc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 443 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fab93b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 245 (LeaseRenewer:jenkins@localhost:33847): State: TIMED_WAITING Blocked count: 7 Waited count: 339 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@66a61897 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 301 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:59716)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@749652a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 66 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c855a4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a318be9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 115 Waited count: 412 Waiting on java.util.concurrent.Semaphore$NonfairSync@77a164b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 107 Waited count: 454 Waiting on java.util.concurrent.Semaphore$NonfairSync@31e43ba0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429): State: WAITING Blocked count: 31 Waited count: 7054 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70550f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3dd5a577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d7ca056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d3a791 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e399ed4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e577879 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 124 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;407411ea95db:40429): State: TIMED_WAITING Blocked count: 12 Waited count: 2667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1108/0x00007f795cf974b0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@bc0f4d2): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3251 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 66 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32437 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5460f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f7bbb2f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b97323f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a51d65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (LeaseRenewer:jenkins.hfs.2@localhost:33847): State: TIMED_WAITING Blocked count: 8 Waited count: 337 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32276 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 465 Waiting on java.util.concurrent.ForkJoinPool@89dc21f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 584 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 986 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@154863ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1241 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 411 Waiting on java.util.concurrent.ForkJoinPool@89dc21f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@5227ef53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1678 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 650 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2897 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2898 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8192 (AsyncFSWAL-1-hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData-prefix:407411ea95db,40429,1732255320691): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@344c4232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8196 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-22T06:07:29,723 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:07:59,723 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;407411ea95db:40429 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 22 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@316fc991 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 3 Waited count: 18 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1473c3fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3946 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.CountDownLatch$Sync@106999b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10421 Waited count: 10938 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@45d56f9e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3320f2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 783 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@66a2842b-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:41781}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2765 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f511dde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33847): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 38515 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1232 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67c6f2de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33847): State: TIMED_WAITING Blocked count: 95 Waited count: 2099 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33847): State: TIMED_WAITING Blocked count: 73 Waited count: 2083 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33847): State: TIMED_WAITING Blocked count: 96 Waited count: 2070 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33847): State: TIMED_WAITING Blocked count: 89 Waited count: 2086 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33847): State: TIMED_WAITING Blocked count: 83 Waited count: 2102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(300259873)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp286242528-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp286242528-87-acceptor-0@60ab5dc9-ServerConnector@f6e660c{HTTP/1.1, (http/1.1)}{localhost:41047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp286242528-88): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp286242528-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-cba4131-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@106c429a): State: TIMED_WAITING Blocked count: 0 Waited count: 780 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44493): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7995302a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1208 Waited count: 1335 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cdce6fb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 432 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 416 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 431 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 425 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp4648250-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp4648250-122-acceptor-0@7d34b2cc-ServerConnector@7112b791{HTTP/1.1, (http/1.1)}{localhost:37893}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp4648250-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp4648250-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1113730973) connection to localhost/127.0.0.1:33847 from jenkins): State: TIMED_WAITING Blocked count: 1231 Waited count: 1231 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-7dc28379-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 0 Waited count: 1845 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@13e020c1): State: TIMED_WAITING Blocked count: 0 Waited count: 780 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34425): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 293 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ad915d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1200 Waited count: 1339 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@290bee6e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1829640939-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1829640939-154-acceptor-0@2bf788a2-ServerConnector@5ed153c3{HTTP/1.1, (http/1.1)}{localhost:42407}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1829640939-155): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1829640939-156): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7785c5c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@252ed2c8): State: TIMED_WAITING Blocked count: 0 Waited count: 779 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38763): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 276 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aa7582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1199 Waited count: 1332 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@54cf8e83): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 414 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 424 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 395 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (java.util.concurrent.ThreadPoolExecutor$Worker@46de5720[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 205 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@406f266a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@3b47d26a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59716): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 332 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f9bdfe9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:59716):): State: WAITING Blocked count: 1 Waited count: 425 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11ca1bc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 448 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fab93b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@66a61897 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:59716)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@749652a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 66 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c855a4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a318be9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 115 Waited count: 412 Waiting on java.util.concurrent.Semaphore$NonfairSync@77a164b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 107 Waited count: 454 Waiting on java.util.concurrent.Semaphore$NonfairSync@31e43ba0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429): State: WAITING Blocked count: 31 Waited count: 7054 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70550f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3dd5a577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d7ca056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d3a791 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e399ed4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e577879 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 124 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;407411ea95db:40429): State: TIMED_WAITING Blocked count: 12 Waited count: 2667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1108/0x00007f795cf974b0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@bc0f4d2): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3850 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 66 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2f16e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38440 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5460f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f7bbb2f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b97323f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a51d65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38279 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 465 Waiting on java.util.concurrent.ForkJoinPool@89dc21f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 584 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 986 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@154863ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1241 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@5227ef53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2897 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2898 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8192 (AsyncFSWAL-1-hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData-prefix:407411ea95db,40429,1732255320691): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@344c4232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8196 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-22T06:08:29,724 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:08:59,724 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;407411ea95db:40429 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 22 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@316fc991 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 3 Waited count: 21 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1473c3fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4545 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.CountDownLatch$Sync@7dd9b19e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10421 Waited count: 10939 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@45d56f9e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3320f2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 903 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@66a2842b-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:41781}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2765 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f511dde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33847): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44439 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1232 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67c6f2de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33847): State: TIMED_WAITING Blocked count: 95 Waited count: 2159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33847): State: TIMED_WAITING Blocked count: 73 Waited count: 2143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33847): State: TIMED_WAITING Blocked count: 96 Waited count: 2130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33847): State: TIMED_WAITING Blocked count: 89 Waited count: 2146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33847): State: TIMED_WAITING Blocked count: 83 Waited count: 2162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 226 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(300259873)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp286242528-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp286242528-87-acceptor-0@60ab5dc9-ServerConnector@f6e660c{HTTP/1.1, (http/1.1)}{localhost:41047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp286242528-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp286242528-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-cba4131-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@106c429a): State: TIMED_WAITING Blocked count: 0 Waited count: 900 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44493): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 316 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7995302a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1228 Waited count: 1375 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cdce6fb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 492 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp4648250-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp4648250-122-acceptor-0@7d34b2cc-ServerConnector@7112b791{HTTP/1.1, (http/1.1)}{localhost:37893}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp4648250-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp4648250-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1113730973) connection to localhost/127.0.0.1:33847 from jenkins): State: TIMED_WAITING Blocked count: 1291 Waited count: 1291 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-7dc28379-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 0 Waited count: 1905 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@13e020c1): State: TIMED_WAITING Blocked count: 0 Waited count: 900 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34425): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 313 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ad915d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1220 Waited count: 1379 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@290bee6e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1829640939-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1829640939-154-acceptor-0@2bf788a2-ServerConnector@5ed153c3{HTTP/1.1, (http/1.1)}{localhost:42407}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1829640939-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1829640939-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7785c5c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@252ed2c8): State: TIMED_WAITING Blocked count: 0 Waited count: 899 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38763): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aa7582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1219 Waited count: 1372 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@54cf8e83): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (java.util.concurrent.ThreadPoolExecutor$Worker@46de5720[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 205 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@406f266a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@3b47d26a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59716): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 225 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 336 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f9bdfe9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:59716):): State: WAITING Blocked count: 1 Waited count: 429 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11ca1bc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 452 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fab93b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@66a61897 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:59716)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@749652a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 66 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c855a4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a318be9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 115 Waited count: 412 Waiting on java.util.concurrent.Semaphore$NonfairSync@77a164b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 107 Waited count: 454 Waiting on java.util.concurrent.Semaphore$NonfairSync@31e43ba0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429): State: WAITING Blocked count: 31 Waited count: 7054 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70550f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3dd5a577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d7ca056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d3a791 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e399ed4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e577879 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 124 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;407411ea95db:40429): State: TIMED_WAITING Blocked count: 12 Waited count: 2667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1108/0x00007f795cf974b0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@bc0f4d2): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4449 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 66 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2f16e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44443 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5460f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f7bbb2f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b97323f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a51d65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44281 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 584 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 986 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 405 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@154863ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@5227ef53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2897 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2898 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8192 (AsyncFSWAL-1-hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData-prefix:407411ea95db,40429,1732255320691): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@344c4232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8196 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-22T06:09:29,725 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:09:59,725 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;407411ea95db:40429 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 22 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@316fc991 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 3 Waited count: 24 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1473c3fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@2bd221c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10421 Waited count: 10940 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@45d56f9e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3320f2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1023 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@66a2842b-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:41781}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2765 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f511dde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33847): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50362 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1232 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67c6f2de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33847): State: TIMED_WAITING Blocked count: 95 Waited count: 2219 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33847): State: TIMED_WAITING Blocked count: 73 Waited count: 2203 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33847): State: TIMED_WAITING Blocked count: 96 Waited count: 2190 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33847): State: TIMED_WAITING Blocked count: 89 Waited count: 2206 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33847): State: TIMED_WAITING Blocked count: 83 Waited count: 2222 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(300259873)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp286242528-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp286242528-87-acceptor-0@60ab5dc9-ServerConnector@f6e660c{HTTP/1.1, (http/1.1)}{localhost:41047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp286242528-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp286242528-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-cba4131-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@106c429a): State: TIMED_WAITING Blocked count: 0 Waited count: 1020 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44493): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 336 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7995302a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1248 Waited count: 1415 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cdce6fb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 545 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp4648250-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp4648250-122-acceptor-0@7d34b2cc-ServerConnector@7112b791{HTTP/1.1, (http/1.1)}{localhost:37893}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp4648250-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp4648250-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1113730973) connection to localhost/127.0.0.1:33847 from jenkins): State: TIMED_WAITING Blocked count: 1351 Waited count: 1351 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-7dc28379-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 0 Waited count: 1965 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@13e020c1): State: TIMED_WAITING Blocked count: 0 Waited count: 1020 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34425): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 333 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ad915d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1240 Waited count: 1419 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@290bee6e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1829640939-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1829640939-154-acceptor-0@2bf788a2-ServerConnector@5ed153c3{HTTP/1.1, (http/1.1)}{localhost:42407}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1829640939-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1829640939-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7785c5c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@252ed2c8): State: TIMED_WAITING Blocked count: 0 Waited count: 1019 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38763): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 316 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aa7582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1239 Waited count: 1412 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@54cf8e83): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (java.util.concurrent.ThreadPoolExecutor$Worker@46de5720[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 205 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@406f266a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@3b47d26a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59716): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 255 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 340 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f9bdfe9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:59716):): State: WAITING Blocked count: 1 Waited count: 433 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11ca1bc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 456 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fab93b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@66a61897 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 421 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:59716)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@749652a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 66 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c855a4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a318be9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 115 Waited count: 412 Waiting on java.util.concurrent.Semaphore$NonfairSync@77a164b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 107 Waited count: 454 Waiting on java.util.concurrent.Semaphore$NonfairSync@31e43ba0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429): State: WAITING Blocked count: 31 Waited count: 7054 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70550f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3dd5a577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d7ca056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d3a791 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e399ed4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e577879 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 124 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;407411ea95db:40429): State: TIMED_WAITING Blocked count: 12 Waited count: 2667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1108/0x00007f795cf974b0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@bc0f4d2): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5049 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 66 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2f16e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50445 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5460f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f7bbb2f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b97323f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a51d65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50283 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 986 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 411 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@154863ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@5227ef53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2897 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2898 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8192 (AsyncFSWAL-1-hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData-prefix:407411ea95db,40429,1732255320691): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@344c4232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8196 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-22T06:10:29,725 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:10:59,726 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:11:27,681 DEBUG [M:0;407411ea95db:40429 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732255587666Disabling compacts and flushes for region at 1732255587666Disabling writes for close at 1732255587679 (+13 ms)Obtaining lock to block concurrent updates at 1732255587679Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732255587679Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=842933, getHeapSize=1011304, getOffHeapSize=0, getCellsCount=2219 at 1732255587679Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732255887681 (+300002 ms) 2024-11-22T06:11:27,681 WARN [M:0;407411ea95db:40429 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3820, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3820, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-22T06:11:27,686 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:11:27,690 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-22T06:11:27,691 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-22T06:11:27,691 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313 2024-11-22T06:11:27,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:11:27,696 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:11:27,696 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313 2024-11-22T06:11:27,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;407411ea95db:40429 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 22 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@316fc991 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 3 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1473c3fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5744 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@2b168dc7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10421 Waited count: 10941 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@45d56f9e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3320f2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@66a2842b-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:41781}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 22 Waited count: 2765 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f511dde Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33847): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1232 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67c6f2de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33847): State: TIMED_WAITING Blocked count: 95 Waited count: 2279 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33847): State: TIMED_WAITING Blocked count: 73 Waited count: 2263 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33847): State: TIMED_WAITING Blocked count: 96 Waited count: 2250 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33847): State: TIMED_WAITING Blocked count: 89 Waited count: 2266 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33847): State: TIMED_WAITING Blocked count: 83 Waited count: 2282 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(300259873)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp286242528-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp286242528-87-acceptor-0@60ab5dc9-ServerConnector@f6e660c{HTTP/1.1, (http/1.1)}{localhost:41047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp286242528-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp286242528-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-cba4131-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@106c429a): State: TIMED_WAITING Blocked count: 0 Waited count: 1140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44493): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 356 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7995302a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1268 Waited count: 1455 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cdce6fb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 702 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44493): State: TIMED_WAITING Blocked count: 0 Waited count: 605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp4648250-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp4648250-122-acceptor-0@7d34b2cc-ServerConnector@7112b791{HTTP/1.1, (http/1.1)}{localhost:37893}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp4648250-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp4648250-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1113730973) connection to localhost/127.0.0.1:33847 from jenkins): State: TIMED_WAITING Blocked count: 1411 Waited count: 1411 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-7dc28379-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 0 Waited count: 2025 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@13e020c1): State: TIMED_WAITING Blocked count: 0 Waited count: 1140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34425): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 353 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ad915d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1260 Waited count: 1459 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@290bee6e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34425): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1829640939-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f795c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1829640939-154-acceptor-0@2bf788a2-ServerConnector@5ed153c3{HTTP/1.1, (http/1.1)}{localhost:42407}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1829640939-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1829640939-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7785c5c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@252ed2c8): State: TIMED_WAITING Blocked count: 0 Waited count: 1139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38763): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 336 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aa7582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847): State: TIMED_WAITING Blocked count: 1259 Waited count: 1452 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@54cf8e83): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38763): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 203 (java.util.concurrent.ThreadPoolExecutor$Worker@46de5720[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 205 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@406f266a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5/current/BP-1246063832-172.17.0.2-1732255315577): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@3b47d26a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59716): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 345 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f9bdfe9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:59716):): State: WAITING Blocked count: 1 Waited count: 438 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11ca1bc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 461 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fab93b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@66a61897 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:59716)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 21 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@749652a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 66 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c855a4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 5 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d3f513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a318be9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 115 Waited count: 412 Waiting on java.util.concurrent.Semaphore$NonfairSync@77a164b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 107 Waited count: 454 Waiting on java.util.concurrent.Semaphore$NonfairSync@31e43ba0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40429): State: WAITING Blocked count: 31 Waited count: 7054 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70550f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41adfd32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3dd5a577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d7ca056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d3a791 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=40429): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e399ed4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e577879 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 124 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;407411ea95db:40429): State: TIMED_WAITING Blocked count: 12 Waited count: 2668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1345/0x00007f795d1f63e0.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/407411ea95db:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@bc0f4d2): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5648 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 66 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c2f16e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56446 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 40 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5460f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 480 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f7bbb2f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b97323f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 482 (regionserver/407411ea95db:0.procedureResultReporter): State: WAITING Blocked count: 9 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a51d65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 986 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 417 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1050 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@154863ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1464 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@5227ef53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2897 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2898 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a9db6a0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4615 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4616 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4617 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8192 (AsyncFSWAL-1-hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData-prefix:407411ea95db,40429,1732255320691): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@344c4232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8196 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8197 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8200 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8201 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1339/0x00007f795d1edcd0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-22T06:11:29,726 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T06:11:31,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T06:11:32,686 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-22T06:11:32,686 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T06:11:32,687 INFO [M:0;407411ea95db:40429 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T06:11:32,687 INFO [M:0;407411ea95db:40429 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40429 2024-11-22T06:11:32,687 INFO [M:0;407411ea95db:40429 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T06:11:32,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33847/user/jenkins/test-data/7afbc7aa-e4d6-2bbe-726a-95cd1dc41788/MasterData/WALs/407411ea95db,40429,1732255320691/407411ea95db%2C40429%2C1732255320691.1732255322313 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-22T06:11:32,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:11:32,859 INFO [M:0;407411ea95db:40429 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T06:11:32,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40429-0x101626c19a90000, quorum=127.0.0.1:59716, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T06:11:32,867 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@174d7797{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T06:11:32,868 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ed153c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:11:32,868 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:11:32,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e41eadb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T06:11:32,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f667f57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:11:32,870 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T06:11:32,870 WARN [BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T06:11:32,870 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T06:11:32,870 WARN [BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1246063832-172.17.0.2-1732255315577 (Datanode Uuid 7b2f6a5a-1bea-4654-98f0-f374a7ca4605) service to localhost/127.0.0.1:33847 2024-11-22T06:11:32,872 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data5/current/BP-1246063832-172.17.0.2-1732255315577 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T06:11:32,873 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data6/current/BP-1246063832-172.17.0.2-1732255315577 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T06:11:32,874 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T06:11:32,879 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7193640b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T06:11:32,879 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7112b791{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:11:32,879 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:11:32,880 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ed86ab3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T06:11:32,880 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f60738e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:11:32,881 WARN [BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T06:11:32,881 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T06:11:32,881 WARN [BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1246063832-172.17.0.2-1732255315577 (Datanode Uuid 5eafc69a-8a8f-4daa-847b-e5a5550eff99) service to localhost/127.0.0.1:33847 2024-11-22T06:11:32,881 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T06:11:32,882 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data3/current/BP-1246063832-172.17.0.2-1732255315577 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T06:11:32,882 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data4/current/BP-1246063832-172.17.0.2-1732255315577 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T06:11:32,882 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T06:11:32,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4177c147{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T06:11:32,884 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f6e660c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:11:32,884 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:11:32,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134642c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T06:11:32,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d9bb98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:11:32,885 WARN [BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T06:11:32,885 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T06:11:32,885 WARN [BP-1246063832-172.17.0.2-1732255315577 heartbeating to localhost/127.0.0.1:33847 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1246063832-172.17.0.2-1732255315577 (Datanode Uuid 5348112f-8cdf-4055-aedf-c9929e8181eb) service to localhost/127.0.0.1:33847 2024-11-22T06:11:32,885 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T06:11:32,886 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data1/current/BP-1246063832-172.17.0.2-1732255315577 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T06:11:32,886 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/cluster_ea8eaf7e-0e5f-1cf3-81e8-b755b5a6cc48/data/data2/current/BP-1246063832-172.17.0.2-1732255315577 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T06:11:32,886 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T06:11:32,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a55f3e1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T06:11:32,893 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T06:11:32,893 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T06:11:32,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T06:11:32,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/292f4186-8980-fa39-2aae-59874d6842be/hadoop.log.dir/,STOPPED} 2024-11-22T06:11:32,904 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T06:11:32,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down